diff --git a/go.mod b/go.mod index a66891b1499..b1c36d2a16d 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,8 @@ module github.com/Azure/ARO-RP go 1.14 require ( - cloud.google.com/go v0.77.0 // indirect github.com/AlekSi/gocov-xml v0.0.0-20190121064608-3a14fb1c4737 - github.com/Azure/azure-sdk-for-go v52.5.0+incompatible + github.com/Azure/azure-sdk-for-go v53.2.0+incompatible github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest/adal v0.9.13 github.com/Azure/go-autorest/autorest/azure/auth v0.5.7 @@ -13,105 +12,121 @@ require ( github.com/Azure/go-autorest/autorest/to v0.4.0 github.com/Azure/go-autorest/autorest/validation v0.3.1 github.com/Azure/go-autorest/tracing v0.6.0 - github.com/Microsoft/go-winio v0.4.16 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Microsoft/go-winio v0.4.17 // indirect github.com/alvaroloes/enumer v1.1.2 github.com/apparentlymart/go-cidr v1.1.0 - github.com/aws/aws-sdk-go v1.37.12 // indirect + github.com/aws/aws-sdk-go v1.38.20 // indirect github.com/axw/gocov v1.0.0 github.com/clarketm/json v1.15.7 // indirect github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9 - github.com/containers/image/v5 v5.10.2 + github.com/containers/image/v5 v5.11.0 github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect - github.com/containers/ocicrypt v1.1.0 // indirect - github.com/containers/storage v1.25.0 // indirect + github.com/containers/ocicrypt v1.1.1 // indirect + github.com/containers/storage v1.29.0 // indirect github.com/coreos/go-oidc v2.2.1+incompatible - github.com/coreos/go-systemd/v22 v22.1.0 + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect + github.com/coreos/go-systemd/v22 v22.3.1 github.com/coreos/ignition/v2 v2.9.0 // indirect - github.com/docker/docker v20.10.3+incompatible // indirect + github.com/coreos/vcontext v0.0.0-20210407161507-4ee6c745c8bd // indirect + github.com/docker/docker v20.10.6+incompatible // indirect github.com/docker/spdystream v0.2.0 // indirect github.com/emicklei/go-restful v2.15.0+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/form3tech-oss/jwt-go v3.2.2+incompatible github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-logr/logr v0.4.0 + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/spec v0.20.3 // indirect - github.com/go-playground/validator/v10 v10.4.1 // indirect + github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-playground/validator/v10 v10.5.0 // indirect github.com/go-test/deep v1.0.7 - github.com/gofrs/uuid v3.3.0+incompatible - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/mock v1.4.4 + github.com/gofrs/uuid v4.0.0+incompatible + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.5.0 github.com/golangci/golangci-lint v1.32.2 - github.com/google/go-cmp v0.5.4 + github.com/google/btree v1.0.1 // indirect + github.com/google/go-cmp v0.5.5 github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.2.0 // indirect github.com/googleapis/gnostic v0.5.4 - github.com/gophercloud/utils v0.0.0-20210216074907-f6de111f2eae // indirect + github.com/gophercloud/gophercloud v0.17.0 // indirect + github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f // indirect github.com/gorilla/csrf v1.7.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/securecookie v1.1.1 github.com/gorilla/sessions v1.2.1 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/h2non/filetype v1.1.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/imdario/mergo v0.3.12 // indirect github.com/jim-minter/go-cosmosdb v0.0.0-20201119201311-b37af9b82812 github.com/jstemmer/go-junit-report v0.9.1 + github.com/klauspost/compress v1.12.1 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/libvirt/libvirt-go v7.0.0+incompatible // indirect - github.com/magefile/mage v1.11.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-runewidth v0.0.10 // indirect + github.com/mattn/go-runewidth v0.0.12 // indirect github.com/metal3-io/cluster-api-provider-baremetal v0.2.2 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/nxadm/tail v1.4.8 // indirect - github.com/onsi/ginkgo v1.15.0 - github.com/onsi/gomega v1.10.5 + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/onsi/ginkgo v1.16.1 + github.com/onsi/gomega v1.11.0 github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/client-go v0.0.0-20200827190008-3062137373b5 - github.com/openshift/cloud-credential-operator v0.0.0-20210217002926-dce99f70f22c // indirect - github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210126223742-4e254d08e0ad // indirect - github.com/openshift/console-operator v0.0.0-20210216151626-6e1cbc849915 + github.com/openshift/cloud-credential-operator v0.0.0-20210415025441-3b263dd9f565 // indirect + github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210322184723-0e61e5b3c9a5 // indirect + github.com/openshift/console-operator v0.0.0-20210414121046-73358d9408c4 github.com/openshift/installer v0.16.1 - github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916 - github.com/openshift/machine-api-operator v0.2.1-0.20210104142355-8e6ae0acdfcf + github.com/openshift/library-go v0.0.0-20210414082648-6e767630a0dc + github.com/openshift/machine-api-operator v0.2.1-0.20210212025836-cb508cd8777d github.com/openshift/machine-config-operator v4.2.0-alpha.0.0.20190917115525-033375cbe820+incompatible - github.com/operator-framework/operator-sdk v1.4.2 + github.com/operator-framework/operator-sdk v1.6.1 github.com/ovirt/go-ovirt v0.0.0-20210112072624-e4d3b104de71 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac // indirect - github.com/prometheus/client_golang v1.9.0 // indirect - github.com/prometheus/common v0.15.0 - github.com/prometheus/procfs v0.6.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect - github.com/sirupsen/logrus v1.7.1 - github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/ugorji/go/codec v1.2.5-0.20210320190651-a2bb12368408 - github.com/ulikunitz/xz v0.5.10 // indirect - github.com/vmware/govmomi v0.24.0 // indirect - go.opencensus.io v0.22.6 // indirect - golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad - golang.org/x/net v0.0.0-20210119194325-5f4716e94777 - golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210217105451-b926d437f341 // indirect - golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect - golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/prometheus/client_golang v1.10.0 // indirect + github.com/prometheus/common v0.20.0 + github.com/russross/blackfriday v1.6.0 // indirect + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/cobra v1.1.3 // indirect + github.com/ugorji/go/codec v1.2.5 + github.com/vmware/govmomi v0.24.1 // indirect + golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 + golang.org/x/mod v0.4.2 // indirect + golang.org/x/net v0.0.0-20210414194228-064579744ee0 + golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210415045647-66c3f260301c // indirect + golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 // indirect + golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect golang.org/x/tools v0.1.0 gomodules.xyz/jsonpatch/v2 v2.1.0 // indirect + google.golang.org/api v0.44.0 // indirect + google.golang.org/genproto v0.0.0-20210414175830-92282443c685 // indirect + google.golang.org/grpc v1.37.0 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/api v0.20.2 - k8s.io/apiextensions-apiserver v0.20.2 - k8s.io/apimachinery v0.20.2 + k8s.io/api v0.21.0 + k8s.io/apiextensions-apiserver v0.21.0 + k8s.io/apimachinery v0.21.0 + k8s.io/apiserver v0.21.0 // indirect + k8s.io/cli-runtime v0.21.0 // indirect k8s.io/client-go v12.0.0+incompatible k8s.io/code-generator v0.19.4 - k8s.io/klog/v2 v2.5.0 // indirect - k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6 // indirect - k8s.io/kubectl v0.19.4 - k8s.io/kubernetes v1.13.0 - k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 // indirect + k8s.io/component-base v0.21.0 // indirect + k8s.io/klog/v2 v2.8.0 // indirect + k8s.io/kube-openapi v0.0.0-20210323165736-1a6458611d18 // indirect + k8s.io/kubectl v0.21.0 + k8s.io/kubernetes v1.21.0 + k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 // indirect sigs.k8s.io/cluster-api-provider-aws v0.6.4 // indirect - sigs.k8s.io/cluster-api-provider-azure v0.4.12 - sigs.k8s.io/cluster-api-provider-openstack v0.3.3 // indirect - sigs.k8s.io/controller-runtime v0.8.2 + sigs.k8s.io/cluster-api-provider-azure v0.4.14 + sigs.k8s.io/cluster-api-provider-openstack v0.3.4 // indirect + sigs.k8s.io/controller-runtime v0.8.3 sigs.k8s.io/controller-tools v0.5.0 ) @@ -181,18 +196,18 @@ replace ( github.com/docker/spdystream => github.com/docker/spdystream v0.1.0 github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.8 github.com/metal3-io/baremetal-operator => github.com/openshift/baremetal-operator v0.0.0-20210128152529-b4b10a088a0c - github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20201105032354-fcd9e769a45c + github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210304234017-16b67b78b538 github.com/openshift/api => github.com/openshift/api v0.0.0-20210127195806-54e5e88cf848 github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200929181438-91d71ef2122c github.com/openshift/cloud-credential-operator => github.com/openshift/cloud-credential-operator v0.0.0-20201202215507-371eb009d9a1 github.com/openshift/cluster-api => github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 - github.com/openshift/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002153134-a0fc9aa4ce81 - github.com/openshift/cluster-api-provider-libvirt => github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20200919090150-1ca52adab176 + github.com/openshift/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210324200451-9b15b1192f37 + github.com/openshift/cluster-api-provider-libvirt => github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20210326022320-b3e2718f198e github.com/openshift/cluster-api-provider-ovirt => github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20210210114935-91f12f3f7dee github.com/openshift/console-operator => github.com/openshift/console-operator v0.0.0-20210116095614-7fd78a283616 github.com/openshift/installer => github.com/jim-minter/installer v0.9.0-master.0.20210407195006-452c09da58ae - github.com/openshift/machine-api-operator => github.com/openshift/machine-api-operator v0.2.1-0.20210212025836-cb508cd8777d - github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20210211205336-14a2b82d9f4c + github.com/openshift/machine-api-operator => github.com/openshift/machine-api-operator v0.2.1-0.20210304062120-b5fb76b27015 + github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20210331171350-d5dc2b519aed github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.19.4 // https://www.whitesourcesoftware.com/vulnerability-database/WS-2018-0594 github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b @@ -225,10 +240,12 @@ replace ( k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.0-rc.2 k8s.io/metrics => k8s.io/metrics v0.19.0-rc.2 k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.0-rc.2 - sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201002185235-b1a6ba661ed8 - sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201119004617-db9109863f2f - sigs.k8s.io/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002153134-a0fc9aa4ce81 - sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20201002114634-3622a0ce6b56 + sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210325053523-878b3a16e964 + sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210324211250-7a14c7f1a41a + sigs.k8s.io/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210324200451-9b15b1192f37 + sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20210318183513-48b73415908a sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.6.4 sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.5.0 ) + +replace github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210204141222-0e7715cd7725 diff --git a/go.sum b/go.sum index 61549876e4c..008f53b7861 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,10 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.77.0 h1:qA5V5+uQf6Mgr+tmFI8UT3D/ELyhIYkPwNGao/3Y+sQ= -cloud.google.com/go v0.77.0/go.mod h1:R8fYSLIilC247Iu8WS2OGHw1E/Ufn7Pd7HiDjTqiURs= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -77,8 +79,8 @@ github.com/Azure/azure-sdk-for-go v42.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v42.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v43.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v53.2.0+incompatible h1:XaOWAbkI9QeaO/YTtmz8Pt+9rLCrLyYdR9AsvP2irSQ= +github.com/Azure/azure-sdk-for-go v53.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -163,6 +165,8 @@ github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -174,13 +178,23 @@ github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZl github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= @@ -228,6 +242,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= @@ -290,8 +305,8 @@ github.com/aws/aws-sdk-go v1.30.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU github.com/aws/aws-sdk-go v1.30.24/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.32.3/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.37.12 h1:rPdjZTlzHn+sbLEO+i535g+WpGf7QBDLYI7rDok+FHo= -github.com/aws/aws-sdk-go v1.37.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.20 h1:QbzNx/tdfATbdKfubBpkt84OM6oBkxQZRw6+bW2GyeA= +github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/axw/gocov v1.0.0 h1:YsqYR66hUmilVr23tu8USgnJIJvnwh3n7j5zRn7x4LU= github.com/axw/gocov v1.0.0/go.mod h1:LvQpEYiwwIb2nYkXY2fDWhg9/AsYqkhmrCshjlUJECE= @@ -332,6 +347,7 @@ github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= github.com/btubbs/datetime v0.1.0/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= github.com/btubbs/datetime v0.1.1/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= @@ -357,6 +373,7 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= @@ -367,6 +384,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= github.com/cilium/ebpf v0.0.0-20200601085316-9f1617e5c574/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= @@ -389,53 +407,102 @@ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcju github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/community-terraform-providers/terraform-provider-ignition/v2 v2.1.0/go.mod h1:0reAZvVLhna+mtZ5RcHH4W8iGwM7ZEAK3Y8TCgn9+ZQ= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE= github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM= -github.com/containers/image/v5 v5.10.2 h1:STD9GYR9p/X0qTLmBYsyx8dEM7zQW+qZ8KHoL/64fkg= -github.com/containers/image/v5 v5.10.2/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs= +github.com/containers/image/v5 v5.11.0 h1:SwxGucW1AZ8H/5KH9jW70lo9WyuOrtxafutyQ9RPPLw= +github.com/containers/image/v5 v5.11.0/go.mod h1:dCbUB4w6gmxIEOCsE0tZQppr8iBoXb4Evr74ZKlmwoI= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M= -github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2DyGPY= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1 h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc= -github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= -github.com/containers/storage v1.25.0 h1:p0PLlQcWmtE+7XLfOCR0WuYyMTby1yozpI4DaKOtWTA= -github.com/containers/storage v1.25.0/go.mod h1:UxTYd5F4mPVqmDRcRL0PBS8+HP74aBn96eahnhEvPtk= +github.com/containers/storage v1.28.1/go.mod h1:5bwiMh2LkrN3AWIfDFMH7A/xbVNLcve+oeXYvHvW8cc= +github.com/containers/storage v1.29.0 h1:l3Vh6+IiMKLGfQZ3rDkF84m+KF1Qb0XEcilWC+pYo2o= +github.com/containers/storage v1.29.0/go.mod h1:u84RU4CCufGeJBNTRNwMB+FoE+AiFeFw4SsMoqAOeCM= github.com/coredns/corefile-migration v1.0.10/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/fcct v0.5.0/go.mod h1:cbE+j77YSQwFB2fozWVB3qsI2Pi3YiVEbDz/b6Yywdo= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU= github.com/coreos/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU= github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -445,6 +512,7 @@ github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHo github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -452,9 +520,13 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So= +github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/ign-converter v0.0.0-20200629171308-e40a44f244c5/go.mod h1:LNu0WTt8iVH/WJH15R/SjZw7AdyY2qAyf9ILZTCBvho= github.com/coreos/ignition v0.33.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= github.com/coreos/ignition v0.34.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= @@ -472,12 +544,16 @@ github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5/go.mod h1:E+6hug9b github.com/coreos/vcontext v0.0.0-20200225161404-ee043618d38d/go.mod h1:z4pMVvaUrxs98RROlIYdAQCKhEicjnTirOaVyDRH5h8= github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c h1:jA28WeORitsxGFVWhyWB06sAG2HbLHPQuHwDydhU2CQ= github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c/go.mod h1:z4pMVvaUrxs98RROlIYdAQCKhEicjnTirOaVyDRH5h8= +github.com/coreos/vcontext v0.0.0-20210407161507-4ee6c745c8bd h1:iHdPNrKIKSSlAy6nCmBliwn7xFTDu+OoFkArqMTr6Zc= +github.com/coreos/vcontext v0.0.0-20210407161507-4ee6c745c8bd/go.mod h1:z4pMVvaUrxs98RROlIYdAQCKhEicjnTirOaVyDRH5h8= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -489,6 +565,10 @@ github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/daixiang0/gci v0.2.4 h1:BUCKk5nlK2m+kRIsoj+wb/5hazHvHeZieBKWd9Afa8Q= github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -518,6 +598,7 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -529,13 +610,14 @@ github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.3+incompatible h1:+HS4XO73J41FpA260ztGujJ+0WibrA2TPJEnWNSyGNE= -github.com/docker/docker v20.10.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= +github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -588,16 +670,18 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v0.0.0-20190815234213-e83c0a1c26c8/go.mod h1:pmLOTb3x90VhIKxsA9yeQG5yfOkkKnkk1h+Ql8NDYDw= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -692,6 +776,8 @@ github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= @@ -699,6 +785,8 @@ github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -724,6 +812,8 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= @@ -736,12 +826,13 @@ github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTM github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.5.0 h1:X9rflw/KmpACwT8zdrm1upefpvdy6ur8d1kWyq6sg3E= +github.com/go-playground/validator/v10 v10.5.0/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= @@ -788,9 +879,12 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY= @@ -798,13 +892,18 @@ github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -822,6 +921,8 @@ github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -832,6 +933,8 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -848,9 +951,15 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -901,6 +1010,8 @@ github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Z github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cadvisor v0.37.0/go.mod h1:OhDE+goNVel0eGY8mR7Ifq1QUI1in5vJBIgIpcajK/I= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -913,7 +1024,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a/go.mod h1:o93WzqysX0jP/10Y13hfL6aq9RoUvGaVdkrH5awMksE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -937,6 +1051,7 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -971,13 +1086,15 @@ github.com/gophercloud/gophercloud v0.10.1-0.20200424014253-c3bfe50899e5/go.mod github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.15.1-0.20210202035223-633d73521055 h1:/wFA5WAzWcHNjpUs/Vuf4g2Sbv0wj3MUkZdeYgU4RkI= github.com/gophercloud/gophercloud v0.15.1-0.20210202035223-633d73521055/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.17.0 h1:BgVw0saxyeHWH5us/SQe1ltp0GRnytjmOLXDA8pO77E= +github.com/gophercloud/gophercloud v0.17.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/utils v0.0.0-20190124231947-9c3b9f2457ef/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= github.com/gophercloud/utils v0.0.0-20190313033024-0bcc8e728cb5/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= github.com/gophercloud/utils v0.0.0-20200423144003-7c72efc7435d/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= -github.com/gophercloud/utils v0.0.0-20210216074907-f6de111f2eae h1:Hi3IgB9RQDE15Kfovd8MTZrcana+UlQqNbOif8dLpA0= -github.com/gophercloud/utils v0.0.0-20210216074907-f6de111f2eae/go.mod h1:wx8HMD8oQD0Ryhz6+6ykq75PJ79iPyEqYHfwZ4l7OsA= +github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f h1:+SO5iEqu9QjNWL9TfAmOE5u0Uizv1T3jpBuMJfMOVJ0= +github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f/go.mod h1:wx8HMD8oQD0Ryhz6+6ykq75PJ79iPyEqYHfwZ4l7OsA= github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= @@ -1042,7 +1159,10 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-azure-helpers v0.4.1/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= github.com/hashicorp/go-azure-helpers v0.10.0/go.mod h1:YuAtHxm2v74s+IjQwUG88dHBJPd5jL+cXr5BGVzSKhE= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= @@ -1070,6 +1190,8 @@ github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1: github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= @@ -1222,14 +1344,18 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= @@ -1281,7 +1407,6 @@ github.com/jsonnet-bundler/jsonnet-bundler v0.3.1/go.mod h1:/by7P/OoohkI3q4CgSFq github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jteeuwen/go-bindata v3.0.8-0.20151023091102-a0ff2567cfb7+incompatible/go.mod h1:JVvhzYOiGBnFSYRyV00iY8q7/0PThjIYav1p9h5dmKs= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -1308,9 +1433,11 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= -github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.1 h1:/+xsCsk06wE38cyiqOR/o7U2fSftcH72xD+BQXmja/g= +github.com/klauspost/compress v1.12.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1333,7 +1460,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= -github.com/kubernetes-sigs/kube-storage-version-migrator v0.0.0-20191127225502-51849bc15f17/go.mod h1:enH0BVV+4+DAgWdwSlMefG8bBzTfVMTr1lApzdLZ/cc= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= @@ -1381,8 +1507,6 @@ github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQN github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls= -github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -1393,6 +1517,7 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= @@ -1434,6 +1559,9 @@ github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= @@ -1488,6 +1616,8 @@ github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= @@ -1509,8 +1639,13 @@ github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hx github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1530,6 +1665,7 @@ github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3D github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s= github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1572,6 +1708,7 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1582,8 +1719,9 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54= +github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1593,12 +1731,13 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug= +github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= @@ -1608,18 +1747,26 @@ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc90.0.20200616040943-82d2fa4eb069/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= -github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb h1:LcPVE5u4oaqw8ffPbJew0lUxZC7faM5t52PgU4px1xY= github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA= +github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E= github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/openshift-metal3/terraform-provider-ironic v0.2.3/go.mod h1:ux2W6gsCIYsY/fX5N0V0ZgwFEBNN7P8g6RlH6ACi97k= github.com/openshift/api v0.0.0-20210127195806-54e5e88cf848 h1:bGRCsb8QqwjWlwSv2AamE/T6AoN1FMm5Lowo74I3HFg= @@ -1635,34 +1782,30 @@ github.com/openshift/cloud-credential-operator v0.0.0-20201202215507-371eb009d9a github.com/openshift/cloud-credential-operator v0.0.0-20201202215507-371eb009d9a1/go.mod h1:DEi4qkr2AMIZxZILwXJf+zkjtoU3tKNJQpNmHFIV98I= github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 h1:IDZyg/Kye98ptqpc9j9rzPjZJlijjEDe8g7TZ67CmLU= github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= -github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201002185235-b1a6ba661ed8 h1:X+tVma3QPyPuPD8gOcVs3buUhCbykdXj54uGbyj1Bvc= -github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201002185235-b1a6ba661ed8/go.mod h1:rDwmh/vpz6mUU/l9QLWeaoGpUeC+b3yyI34xnp3tIf8= -github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201119004617-db9109863f2f h1:h4Bq3YdZJ9lr6cDZP5HPfl+bwJPzY9Iah7KNSkbALEs= -github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201119004617-db9109863f2f/go.mod h1:oOG/TNSBse4brosfLCH/G2Q/42ye+DZQq8VslA5SxOs= -github.com/openshift/cluster-api-provider-baremetal v0.0.0-20201105032354-fcd9e769a45c h1:21XJFYyE87XGmmQ06m4DRS1u/n5r5H36z2qF5eY914E= -github.com/openshift/cluster-api-provider-baremetal v0.0.0-20201105032354-fcd9e769a45c/go.mod h1:t7bIW61Oa74MO2VzwdSSQIE4/dFt6kNz16qZYINRMSw= -github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210126223742-4e254d08e0ad h1:zzNSQJCXTlI3Q/yipL6w7x1th2fgfqPgD4lQPfUsvK4= -github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210126223742-4e254d08e0ad/go.mod h1:3q1x2OTFeeCsoIqFPvngVBeoLE0pP0GzS2FQ3u/dbtk= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002153134-a0fc9aa4ce81 h1:vzwKneicukAJO3DtlH2a1DXQBCjaTd8dWLYu/2mij04= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002153134-a0fc9aa4ce81/go.mod h1:38+VvkKbKyAV3FJW9/QvnOm6hw8YR4LLPqgxcD/E2o8= -github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20200919090150-1ca52adab176 h1:P8Gig9tCK1/vggkqWQi4wflHwgPLphCdmeySdiILLU0= -github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20200919090150-1ca52adab176/go.mod h1:IlW/3Vm/qYaqQLUQSP7MqdJsq/etTP8KEyVuk7rSLoo= -github.com/openshift/cluster-api-provider-openstack v0.0.0-20201002114634-3622a0ce6b56 h1:Dv6vDwkXHc5Q3YAGbqf33Ob6nmaIDZ/MeNQ0QjcEYVc= -github.com/openshift/cluster-api-provider-openstack v0.0.0-20201002114634-3622a0ce6b56/go.mod h1:G7EmtWOHWzPKf9uQxw4DNCuZVWbYb/cgdpIK7gNzo+4= +github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210325053523-878b3a16e964 h1:OKHrgBo4JlfrEUaVf9aBeIqjZY3GTMOs3rK5ghi2C0Y= +github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210325053523-878b3a16e964/go.mod h1:Mgg11Qis5VFw8+mFCrb2lTlXjYjiAyhLuSiZ18cZyPk= +github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210324211250-7a14c7f1a41a h1:vfKQUL9SWXsy1IIhsLddRUM/4UZozuKk5AQxUJrCB+g= +github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210324211250-7a14c7f1a41a/go.mod h1:c4u3i7QxdC5oaIelkEgbN1hu5sVJKlhbk5iEI0GFFl4= +github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210304234017-16b67b78b538 h1:kdMfaP4CHWzLuHJfzox4yaSbJy9uV9LdDD1e/f9LE20= +github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210304234017-16b67b78b538/go.mod h1:7vGGlN2bJaNMbl6B9lOAuKCO+haAciQ19OwFwzW7mFo= +github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210322184723-0e61e5b3c9a5 h1:X5vttUTv6lmEVotcDz083hBNkF/4gFHSgoJZvinKvyI= +github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210322184723-0e61e5b3c9a5/go.mod h1:envfD/5AmIUauc3EVWtBcOvBGZj2++xUXCxdZ3W+XUI= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210324200451-9b15b1192f37 h1:FbGulMAVGuS/wUwkbdA2lOUql0oYsCH7xp727yAjmEE= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210324200451-9b15b1192f37/go.mod h1:ldScr111Nnn0qM1IdpCD3c9f5/DHBxa+yhKW5r/boNU= +github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20210326022320-b3e2718f198e h1:Z0q3GWEWC7AZWgsOGejHv1B4O/3bgZ7Lq+RfQeUkl+A= +github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20210326022320-b3e2718f198e/go.mod h1:MHjk1spD7aLlK5rb0rc39sJArzsPTGu6ub/k+KMDum8= +github.com/openshift/cluster-api-provider-openstack v0.0.0-20210318183513-48b73415908a h1:x+ztzxJZS8rzjCC0Hz/l1QpqRRnWfW6K8mlFm/yjoQk= +github.com/openshift/cluster-api-provider-openstack v0.0.0-20210318183513-48b73415908a/go.mod h1:0tDSG2/cQE7pm9gPMX8lASzsfnjg3AyqWmeAmyfldAw= github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20210210114935-91f12f3f7dee h1:gAY5/4sRA8VvW2HSkSK/9QR01A8otIP3O3BXxyHlFGk= github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20210210114935-91f12f3f7dee/go.mod h1:qxjxa8wfM7mM8aSkiqsHXNy7tt6JEde5fuzDbfVFPSg= github.com/openshift/console-operator v0.0.0-20210116095614-7fd78a283616 h1:HxEPU7i6gefzBHzPX7DtpBQyOKAuAU/ytl3E1nQmgaQ= github.com/openshift/console-operator v0.0.0-20210116095614-7fd78a283616/go.mod h1:PSYON6wf2apMwuUEDiak89yd039C26rHEa2pzydx6hQ= -github.com/openshift/library-go v0.0.0-20191003152030-97c62d8a2901/go.mod h1:NBttNjZpWwup/nthuLbPAPSYC8Qyo+BBK5bCtFoyYjo= -github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81/go.mod h1:Qc5duoXHzAKyUfA0REIlG/rdfWzknOpp9SiDiyg5Y7A= -github.com/openshift/library-go v0.0.0-20200831114015-2ab0c61c15de/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= -github.com/openshift/library-go v0.0.0-20200907120738-ea57b121ba1a/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= -github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916 h1:H9XwZlu78Pn87KNBGGjZY0L2fLOoPnzEajigVur5ZOY= -github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= -github.com/openshift/machine-api-operator v0.2.1-0.20210212025836-cb508cd8777d h1:HPsOufOinE9dt/EtqY20gigk+es+y53GtcQPj4ItRl0= -github.com/openshift/machine-api-operator v0.2.1-0.20210212025836-cb508cd8777d/go.mod h1:UZK7t/t3dO73RqC9MsXM/BAIMWRsNaE9NEKUXBBY8y8= -github.com/openshift/machine-config-operator v0.0.1-0.20210211205336-14a2b82d9f4c h1:TKUUPRVwbaYrDWk23OwelrUgylNOOxhu2dfYq18pVrU= -github.com/openshift/machine-config-operator v0.0.1-0.20210211205336-14a2b82d9f4c/go.mod h1:R6yh4vExBv/SWSHjE2rpECjJOovs4NIAAgCK5RXmna4= +github.com/openshift/library-go v0.0.0-20210204141222-0e7715cd7725 h1:9JQ5A3EnizowEXcfK2K4+WbTjrs+mAM5L10dkdhYuZs= +github.com/openshift/library-go v0.0.0-20210204141222-0e7715cd7725/go.mod h1:KNfLGf4dIRJ+QB2aGy67AOy1k+DV783cMCuJf0d4Zik= +github.com/openshift/machine-api-operator v0.2.1-0.20210304062120-b5fb76b27015 h1:aKUQEIeoqnLg8FYVdqAPYk1yGj5NOE7kJ9NbzIugs3s= +github.com/openshift/machine-api-operator v0.2.1-0.20210304062120-b5fb76b27015/go.mod h1:UZK7t/t3dO73RqC9MsXM/BAIMWRsNaE9NEKUXBBY8y8= +github.com/openshift/machine-config-operator v0.0.1-0.20210331171350-d5dc2b519aed h1:pdbX7BVbQzGN4ocd6umgfnCzkGw71IlA9hjllIxTXJ8= +github.com/openshift/machine-config-operator v0.0.1-0.20210331171350-d5dc2b519aed/go.mod h1:R6yh4vExBv/SWSHjE2rpECjJOovs4NIAAgCK5RXmna4= github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= github.com/openshift/runtime-utils v0.0.0-20200415173359-c45d4ff3f912/go.mod h1:0OXNy7VoqFexkxKqyQbHJLPwn1MFp1/CxRJAgKHM+/o= @@ -1744,8 +1887,8 @@ github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DK github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac h1:jWKYCNlX4J5s8M0nHYkh7Y7c9gRVDEb3mq51j5J0F5M= -github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac/go.mod h1:hoLfEwdY11HjRfKFH6KqnPsfxlo3BP6bJehpDv8t6sQ= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/otp v1.1.0/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nhgMk= @@ -1766,8 +1909,8 @@ github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNk github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1790,8 +1933,9 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.20.0 h1:pfeDeUdQcIxOMutNjCejsEFp7qeP+/iltHSSmLpE+hU= +github.com/prometheus/common v0.20.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1808,7 +1952,6 @@ github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= @@ -1847,6 +1990,8 @@ github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvf github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= github.com/ryancurrah/gomodguard v1.1.0 h1:DWbye9KyMgytn8uYpuHkwf0RHqAYO6Ay/D0TbCpPtVU= @@ -1857,6 +2002,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -1889,14 +2035,15 @@ github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJV github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.7.1 h1:rsizeFmZP+GYwyb4V6t6qpG7ZNWzA2bvgW/yC2xHCcg= -github.com/sirupsen/logrus v1.7.1/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= @@ -1933,6 +2080,8 @@ github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= @@ -1962,10 +2111,12 @@ github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0 github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1983,6 +2134,7 @@ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2 h1:Xr9gkxfOP0KQWXKNqmwe8vEeSUiUj4Rlee9CMVX2ZUQ= github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= @@ -2029,7 +2181,6 @@ github.com/ugorji/go/codec v1.2.2/go.mod h1:OM8g7OAy52uYl3Yk+RE/3AS1nXFn1Wh4PPLt github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= @@ -2054,14 +2205,16 @@ github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww= -github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg= -github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= +github.com/vbauerster/mpb/v6 v6.0.3 h1:j+twHHhSUe8aXWaT/27E98G5cSBeqEuJSVCMjmLg0PI= +github.com/vbauerster/mpb/v6 v6.0.3/go.mod h1:5luBx4rDLWxpA4t6I5sdeeQuZhqDxc+wr5Nqf35+tnM= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 h1:uxE3GYdXIOfhMv3unJKETJEhw78gvzuQqRX/rVirc2A= github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -2070,10 +2223,11 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6Ac github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.22.1/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/govmomi v0.22.2/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= -github.com/vmware/govmomi v0.24.0 h1:G7YFF6unMTG3OY25Dh278fsomVTKs46m2ENlEFSbmbs= -github.com/vmware/govmomi v0.24.0/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= +github.com/vmware/govmomi v0.24.1 h1:ecVvrxF28/5g738gLTiYgc62fpGfIPRKheQ1Dj1p35w= +github.com/vmware/govmomi v0.24.1/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= @@ -2146,8 +2300,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.22.6 h1:BdkrbWrzDlV9dnbzoP7sfN+dHheJ4J9JOaYxcUDL+ok= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -2174,6 +2328,7 @@ golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5a golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2209,12 +2364,13 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2258,6 +2414,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2266,6 +2424,7 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2314,7 +2473,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -2326,8 +2484,13 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210414194228-064579744ee0 h1:iqW3Mjl/6IP9cHJC/wdiIu3lyBDMUfDElRMyFlqbtiQ= +golang.org/x/net v0.0.0-20210414194228-064579744ee0/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2342,9 +2505,12 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e h1:xxTKAjlluPXFVQnUNoBO7OvmNNE/RpmyUeLVFSYiQQ0= -golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 h1:rPRtHfUb0UKZeZ6GH4K4Nt4YRbE9V1u+QZX5upZXqJQ= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2357,6 +2523,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2390,11 +2558,13 @@ golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2402,6 +2572,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2423,6 +2594,7 @@ golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2451,27 +2623,38 @@ golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201116194326-cc9327a14d48/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341 h1:2/QtM1mL37YmcsT8HaDNHDgTqqFVw+zr8UzMiBVLzYU= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210415045647-66c3f260301c h1:6L+uOeS3OQt/f4eFHXZcTxeZrGCuz+CLElgEBjbcTA4= +golang.org/x/sys v0.0.0-20210415045647-66c3f260301c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= -golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 h1:VqE9gduFZ4dbR7XoL77lHFp0/DyDUBKSXK7CMFkVcV0= +golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2483,13 +2666,15 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2583,7 +2768,6 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200519175826-7521f6f42533/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200602230032-c00d67ef29d0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2665,6 +2849,10 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2690,6 +2878,7 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -2735,8 +2924,13 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d h1:Edhcm0CKDPLQIecHCp5Iz57Lo7MfT6zUFBAlocmOjcY= -google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210414175830-92282443c685 h1:haf+Lo6ErjmEGRdEPFhwbQAiOoq7s+7xKIMfxOzmoSs= +google.golang.org/genproto v0.0.0-20210414175830-92282443c685/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -2767,6 +2961,10 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200709232328-d8193ee9cc3e/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2779,6 +2977,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f h1:AQkMzsSzHWrgZWqGRpuRaRPDmyNibcXlpGcnQJ7HxZw= gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f/go.mod h1:CaHjv79TCgAvXMSFJSVgonHXYWxnhzI3eoHtnX5UgUo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -2845,6 +3046,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2895,8 +3098,8 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI= -k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-aggregator v0.19.0-rc.2/go.mod h1:6PWWQhJWKECXUitSZ8Mo6LD9gIu4sPZaPpOEU67NkmM= k8s.io/kube-controller-manager v0.19.0-rc.2/go.mod h1:WlVUi/rJKZUMBwOFUts9fMFVaPZCxxtoP2pVyyD+UnM= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= @@ -2922,8 +3125,9 @@ k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 h1:u5rPykqiCpL+LBfjRkXvnK71gOgIdmq3eHUEkPrbeTI= +k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= @@ -2951,6 +3155,7 @@ sigs.k8s.io/controller-runtime v0.6.4 h1:4013CKsBs5bEqo+LevzDett+LLxag/FjQWG94nV sigs.k8s.io/controller-runtime v0.6.4/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= sigs.k8s.io/controller-tools v0.5.0 h1:3u2RCwOlp0cjCALAigpOcbAf50pE+kHSdueUosrC/AE= sigs.k8s.io/controller-tools v0.5.0/go.mod h1:JTsstrMpxs+9BUj6eGuAaEb6SDSPTeVtUyp0jmnAM/I= +sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/kubebuilder v1.0.9-0.20200618125005-36aa113dbe99/go.mod h1:FGPx0hvP73+bapzWoy5ePuhAJYgJjrFbPxgvWyortM0= sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= diff --git a/pkg/util/azureclient/apiversions.go b/pkg/util/azureclient/apiversions.go index 6a82ccc87ad..aea1e49c5e1 100644 --- a/pkg/util/azureclient/apiversions.go +++ b/pkg/util/azureclient/apiversions.go @@ -9,21 +9,22 @@ import ( // keys must be lower case var apiVersions = map[string]string{ - "microsoft.authorization": "2018-09-01-preview", - "microsoft.authorization/denyassignments": "2018-07-01-preview", - "microsoft.authorization/roledefinitions": "2018-01-01-preview", - "microsoft.compute": "2020-06-01", - "microsoft.compute/disks": "2019-03-01", // 2020-06-01 doesn't exist for Microsoft.Compute/disks; needed in delete path - "microsoft.compute/snapshots": "2020-05-01", // 2020-06-01 doesn't exist for Microsoft.Compute/snapshots; needed in list resources see https://github.com/Azure/ARO-RP/issues/1313 - "microsoft.containerregistry": "2020-11-01-preview", - "microsoft.documentdb": "2019-08-01", - "microsoft.insights": "2018-03-01", - "microsoft.keyvault": "2016-10-01", - "microsoft.managedidentity": "2018-11-30", - "microsoft.network": "2019-07-01", - "microsoft.network/dnszones": "2018-05-01", - "microsoft.network/privatednszones": "2018-09-01", - "microsoft.storage": "2019-04-01", + "microsoft.authorization": "2018-09-01-preview", + "microsoft.authorization/denyassignments": "2018-07-01-preview", + "microsoft.authorization/roledefinitions": "2018-01-01-preview", + "microsoft.compute": "2020-06-01", + "microsoft.compute/disks": "2019-03-01", // 2020-06-01 doesn't exist for Microsoft.Compute/disks; needed in delete path + "microsoft.compute/snapshots": "2020-05-01", // 2020-06-01 doesn't exist for Microsoft.Compute/snapshots; needed in list resources see https://github.com/Azure/ARO-RP/issues/1313 + "microsoft.containerregistry": "2020-11-01-preview", + "microsoft.documentdb": "2019-08-01", + "microsoft.insights": "2018-03-01", + "microsoft.keyvault": "2016-10-01", + "microsoft.managedidentity": "2018-11-30", + "microsoft.network": "2019-07-01", + "microsoft.network/dnszones": "2018-05-01", + "microsoft.network/privatednszones": "2018-09-01", + "microsoft.storage": "2019-04-01", + "microsoft.operationalinsights/workspaces": "2015-03-20", // quickfix, needed in list resources see https://github.com/Azure/ARO-RP/issues/1313 } // APIVersion gets the APIVersion from a full resource type diff --git a/pkg/util/mocks/adminactions/adminactions.go b/pkg/util/mocks/adminactions/adminactions.go index 3bb3a628bec..85553191df6 100644 --- a/pkg/util/mocks/adminactions/adminactions.go +++ b/pkg/util/mocks/adminactions/adminactions.go @@ -14,30 +14,30 @@ import ( unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) -// MockKubeActions is a mock of KubeActions interface +// MockKubeActions is a mock of KubeActions interface. type MockKubeActions struct { ctrl *gomock.Controller recorder *MockKubeActionsMockRecorder } -// MockKubeActionsMockRecorder is the mock recorder for MockKubeActions +// MockKubeActionsMockRecorder is the mock recorder for MockKubeActions. type MockKubeActionsMockRecorder struct { mock *MockKubeActions } -// NewMockKubeActions creates a new mock instance +// NewMockKubeActions creates a new mock instance. func NewMockKubeActions(ctrl *gomock.Controller) *MockKubeActions { mock := &MockKubeActions{ctrl: ctrl} mock.recorder = &MockKubeActionsMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockKubeActions) EXPECT() *MockKubeActionsMockRecorder { return m.recorder } -// KubeCreateOrUpdate mocks base method +// KubeCreateOrUpdate mocks base method. func (m *MockKubeActions) KubeCreateOrUpdate(arg0 context.Context, arg1 *unstructured.Unstructured) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "KubeCreateOrUpdate", arg0, arg1) @@ -45,13 +45,13 @@ func (m *MockKubeActions) KubeCreateOrUpdate(arg0 context.Context, arg1 *unstruc return ret0 } -// KubeCreateOrUpdate indicates an expected call of KubeCreateOrUpdate +// KubeCreateOrUpdate indicates an expected call of KubeCreateOrUpdate. func (mr *MockKubeActionsMockRecorder) KubeCreateOrUpdate(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KubeCreateOrUpdate", reflect.TypeOf((*MockKubeActions)(nil).KubeCreateOrUpdate), arg0, arg1) } -// KubeDelete mocks base method +// KubeDelete mocks base method. func (m *MockKubeActions) KubeDelete(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "KubeDelete", arg0, arg1, arg2, arg3) @@ -59,13 +59,13 @@ func (m *MockKubeActions) KubeDelete(arg0 context.Context, arg1, arg2, arg3 stri return ret0 } -// KubeDelete indicates an expected call of KubeDelete +// KubeDelete indicates an expected call of KubeDelete. func (mr *MockKubeActionsMockRecorder) KubeDelete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KubeDelete", reflect.TypeOf((*MockKubeActions)(nil).KubeDelete), arg0, arg1, arg2, arg3) } -// KubeGet mocks base method +// KubeGet mocks base method. func (m *MockKubeActions) KubeGet(arg0 context.Context, arg1, arg2, arg3 string) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "KubeGet", arg0, arg1, arg2, arg3) @@ -74,13 +74,13 @@ func (m *MockKubeActions) KubeGet(arg0 context.Context, arg1, arg2, arg3 string) return ret0, ret1 } -// KubeGet indicates an expected call of KubeGet +// KubeGet indicates an expected call of KubeGet. func (mr *MockKubeActionsMockRecorder) KubeGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KubeGet", reflect.TypeOf((*MockKubeActions)(nil).KubeGet), arg0, arg1, arg2, arg3) } -// KubeList mocks base method +// KubeList mocks base method. func (m *MockKubeActions) KubeList(arg0 context.Context, arg1, arg2 string) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "KubeList", arg0, arg1, arg2) @@ -89,13 +89,13 @@ func (m *MockKubeActions) KubeList(arg0 context.Context, arg1, arg2 string) ([]b return ret0, ret1 } -// KubeList indicates an expected call of KubeList +// KubeList indicates an expected call of KubeList. func (mr *MockKubeActionsMockRecorder) KubeList(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KubeList", reflect.TypeOf((*MockKubeActions)(nil).KubeList), arg0, arg1, arg2) } -// Upgrade mocks base method +// Upgrade mocks base method. func (m *MockKubeActions) Upgrade(arg0 context.Context, arg1 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Upgrade", arg0, arg1) @@ -103,36 +103,36 @@ func (m *MockKubeActions) Upgrade(arg0 context.Context, arg1 bool) error { return ret0 } -// Upgrade indicates an expected call of Upgrade +// Upgrade indicates an expected call of Upgrade. func (mr *MockKubeActionsMockRecorder) Upgrade(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockKubeActions)(nil).Upgrade), arg0, arg1) } -// MockAzureActions is a mock of AzureActions interface +// MockAzureActions is a mock of AzureActions interface. type MockAzureActions struct { ctrl *gomock.Controller recorder *MockAzureActionsMockRecorder } -// MockAzureActionsMockRecorder is the mock recorder for MockAzureActions +// MockAzureActionsMockRecorder is the mock recorder for MockAzureActions. type MockAzureActionsMockRecorder struct { mock *MockAzureActions } -// NewMockAzureActions creates a new mock instance +// NewMockAzureActions creates a new mock instance. func NewMockAzureActions(ctrl *gomock.Controller) *MockAzureActions { mock := &MockAzureActions{ctrl: ctrl} mock.recorder = &MockAzureActionsMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockAzureActions) EXPECT() *MockAzureActionsMockRecorder { return m.recorder } -// ResourcesList mocks base method +// ResourcesList mocks base method. func (m *MockAzureActions) ResourcesList(arg0 context.Context) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ResourcesList", arg0) @@ -141,13 +141,13 @@ func (m *MockAzureActions) ResourcesList(arg0 context.Context) ([]byte, error) { return ret0, ret1 } -// ResourcesList indicates an expected call of ResourcesList +// ResourcesList indicates an expected call of ResourcesList. func (mr *MockAzureActionsMockRecorder) ResourcesList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourcesList", reflect.TypeOf((*MockAzureActions)(nil).ResourcesList), arg0) } -// VMRedeployAndWait mocks base method +// VMRedeployAndWait mocks base method. func (m *MockAzureActions) VMRedeployAndWait(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VMRedeployAndWait", arg0, arg1) @@ -155,13 +155,13 @@ func (m *MockAzureActions) VMRedeployAndWait(arg0 context.Context, arg1 string) return ret0 } -// VMRedeployAndWait indicates an expected call of VMRedeployAndWait +// VMRedeployAndWait indicates an expected call of VMRedeployAndWait. func (mr *MockAzureActionsMockRecorder) VMRedeployAndWait(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VMRedeployAndWait", reflect.TypeOf((*MockAzureActions)(nil).VMRedeployAndWait), arg0, arg1) } -// VMSerialConsole mocks base method +// VMSerialConsole mocks base method. func (m *MockAzureActions) VMSerialConsole(arg0 context.Context, arg1 http.ResponseWriter, arg2 *logrus.Entry, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VMSerialConsole", arg0, arg1, arg2, arg3) @@ -169,7 +169,7 @@ func (m *MockAzureActions) VMSerialConsole(arg0 context.Context, arg1 http.Respo return ret0 } -// VMSerialConsole indicates an expected call of VMSerialConsole +// VMSerialConsole indicates an expected call of VMSerialConsole. func (mr *MockAzureActionsMockRecorder) VMSerialConsole(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VMSerialConsole", reflect.TypeOf((*MockAzureActions)(nil).VMSerialConsole), arg0, arg1, arg2, arg3) diff --git a/pkg/util/mocks/azureclient/graphrbac/graphrbac.go b/pkg/util/mocks/azureclient/graphrbac/graphrbac.go index feb19ab7c3a..41a37771831 100644 --- a/pkg/util/mocks/azureclient/graphrbac/graphrbac.go +++ b/pkg/util/mocks/azureclient/graphrbac/graphrbac.go @@ -13,30 +13,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockApplicationsClient is a mock of ApplicationsClient interface +// MockApplicationsClient is a mock of ApplicationsClient interface. type MockApplicationsClient struct { ctrl *gomock.Controller recorder *MockApplicationsClientMockRecorder } -// MockApplicationsClientMockRecorder is the mock recorder for MockApplicationsClient +// MockApplicationsClientMockRecorder is the mock recorder for MockApplicationsClient. type MockApplicationsClientMockRecorder struct { mock *MockApplicationsClient } -// NewMockApplicationsClient creates a new mock instance +// NewMockApplicationsClient creates a new mock instance. func NewMockApplicationsClient(ctrl *gomock.Controller) *MockApplicationsClient { mock := &MockApplicationsClient{ctrl: ctrl} mock.recorder = &MockApplicationsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockApplicationsClient) EXPECT() *MockApplicationsClientMockRecorder { return m.recorder } -// Create mocks base method +// Create mocks base method. func (m *MockApplicationsClient) Create(arg0 context.Context, arg1 graphrbac.ApplicationCreateParameters) (graphrbac.Application, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Create", arg0, arg1) @@ -45,13 +45,13 @@ func (m *MockApplicationsClient) Create(arg0 context.Context, arg1 graphrbac.App return ret0, ret1 } -// Create indicates an expected call of Create +// Create indicates an expected call of Create. func (mr *MockApplicationsClientMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockApplicationsClient)(nil).Create), arg0, arg1) } -// Delete mocks base method +// Delete mocks base method. func (m *MockApplicationsClient) Delete(arg0 context.Context, arg1 string) (autorest.Response, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1) @@ -60,13 +60,13 @@ func (m *MockApplicationsClient) Delete(arg0 context.Context, arg1 string) (auto return ret0, ret1 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockApplicationsClientMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockApplicationsClient)(nil).Delete), arg0, arg1) } -// GetServicePrincipalsIDByAppID mocks base method +// GetServicePrincipalsIDByAppID mocks base method. func (m *MockApplicationsClient) GetServicePrincipalsIDByAppID(arg0 context.Context, arg1 string) (graphrbac.ServicePrincipalObjectResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetServicePrincipalsIDByAppID", arg0, arg1) @@ -75,13 +75,13 @@ func (m *MockApplicationsClient) GetServicePrincipalsIDByAppID(arg0 context.Cont return ret0, ret1 } -// GetServicePrincipalsIDByAppID indicates an expected call of GetServicePrincipalsIDByAppID +// GetServicePrincipalsIDByAppID indicates an expected call of GetServicePrincipalsIDByAppID. func (mr *MockApplicationsClientMockRecorder) GetServicePrincipalsIDByAppID(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicePrincipalsIDByAppID", reflect.TypeOf((*MockApplicationsClient)(nil).GetServicePrincipalsIDByAppID), arg0, arg1) } -// List mocks base method +// List mocks base method. func (m *MockApplicationsClient) List(arg0 context.Context, arg1 string) ([]graphrbac.Application, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -90,36 +90,36 @@ func (m *MockApplicationsClient) List(arg0 context.Context, arg1 string) ([]grap return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockApplicationsClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockApplicationsClient)(nil).List), arg0, arg1) } -// MockServicePrincipalClient is a mock of ServicePrincipalClient interface +// MockServicePrincipalClient is a mock of ServicePrincipalClient interface. type MockServicePrincipalClient struct { ctrl *gomock.Controller recorder *MockServicePrincipalClientMockRecorder } -// MockServicePrincipalClientMockRecorder is the mock recorder for MockServicePrincipalClient +// MockServicePrincipalClientMockRecorder is the mock recorder for MockServicePrincipalClient. type MockServicePrincipalClientMockRecorder struct { mock *MockServicePrincipalClient } -// NewMockServicePrincipalClient creates a new mock instance +// NewMockServicePrincipalClient creates a new mock instance. func NewMockServicePrincipalClient(ctrl *gomock.Controller) *MockServicePrincipalClient { mock := &MockServicePrincipalClient{ctrl: ctrl} mock.recorder = &MockServicePrincipalClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockServicePrincipalClient) EXPECT() *MockServicePrincipalClientMockRecorder { return m.recorder } -// Create mocks base method +// Create mocks base method. func (m *MockServicePrincipalClient) Create(arg0 context.Context, arg1 graphrbac.ServicePrincipalCreateParameters) (graphrbac.ServicePrincipal, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Create", arg0, arg1) @@ -128,13 +128,13 @@ func (m *MockServicePrincipalClient) Create(arg0 context.Context, arg1 graphrbac return ret0, ret1 } -// Create indicates an expected call of Create +// Create indicates an expected call of Create. func (mr *MockServicePrincipalClientMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockServicePrincipalClient)(nil).Create), arg0, arg1) } -// List mocks base method +// List mocks base method. func (m *MockServicePrincipalClient) List(arg0 context.Context, arg1 string) ([]graphrbac.ServicePrincipal, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -143,7 +143,7 @@ func (m *MockServicePrincipalClient) List(arg0 context.Context, arg1 string) ([] return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockServicePrincipalClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockServicePrincipalClient)(nil).List), arg0, arg1) diff --git a/pkg/util/mocks/azureclient/keyvault/keyvault.go b/pkg/util/mocks/azureclient/keyvault/keyvault.go index 2045db382f2..232b83e612c 100644 --- a/pkg/util/mocks/azureclient/keyvault/keyvault.go +++ b/pkg/util/mocks/azureclient/keyvault/keyvault.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockBaseClient is a mock of BaseClient interface +// MockBaseClient is a mock of BaseClient interface. type MockBaseClient struct { ctrl *gomock.Controller recorder *MockBaseClientMockRecorder } -// MockBaseClientMockRecorder is the mock recorder for MockBaseClient +// MockBaseClientMockRecorder is the mock recorder for MockBaseClient. type MockBaseClientMockRecorder struct { mock *MockBaseClient } -// NewMockBaseClient creates a new mock instance +// NewMockBaseClient creates a new mock instance. func NewMockBaseClient(ctrl *gomock.Controller) *MockBaseClient { mock := &MockBaseClient{ctrl: ctrl} mock.recorder = &MockBaseClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockBaseClient) EXPECT() *MockBaseClientMockRecorder { return m.recorder } -// CreateCertificate mocks base method +// CreateCertificate mocks base method. func (m *MockBaseClient) CreateCertificate(arg0 context.Context, arg1, arg2 string, arg3 keyvault.CertificateCreateParameters) (keyvault.CertificateOperation, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateCertificate", arg0, arg1, arg2, arg3) @@ -44,13 +44,13 @@ func (m *MockBaseClient) CreateCertificate(arg0 context.Context, arg1, arg2 stri return ret0, ret1 } -// CreateCertificate indicates an expected call of CreateCertificate +// CreateCertificate indicates an expected call of CreateCertificate. func (mr *MockBaseClientMockRecorder) CreateCertificate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCertificate", reflect.TypeOf((*MockBaseClient)(nil).CreateCertificate), arg0, arg1, arg2, arg3) } -// DeleteCertificate mocks base method +// DeleteCertificate mocks base method. func (m *MockBaseClient) DeleteCertificate(arg0 context.Context, arg1, arg2 string) (keyvault.DeletedCertificateBundle, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteCertificate", arg0, arg1, arg2) @@ -59,13 +59,13 @@ func (m *MockBaseClient) DeleteCertificate(arg0 context.Context, arg1, arg2 stri return ret0, ret1 } -// DeleteCertificate indicates an expected call of DeleteCertificate +// DeleteCertificate indicates an expected call of DeleteCertificate. func (mr *MockBaseClientMockRecorder) DeleteCertificate(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCertificate", reflect.TypeOf((*MockBaseClient)(nil).DeleteCertificate), arg0, arg1, arg2) } -// GetCertificateOperation mocks base method +// GetCertificateOperation mocks base method. func (m *MockBaseClient) GetCertificateOperation(arg0 context.Context, arg1, arg2 string) (keyvault.CertificateOperation, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCertificateOperation", arg0, arg1, arg2) @@ -74,13 +74,13 @@ func (m *MockBaseClient) GetCertificateOperation(arg0 context.Context, arg1, arg return ret0, ret1 } -// GetCertificateOperation indicates an expected call of GetCertificateOperation +// GetCertificateOperation indicates an expected call of GetCertificateOperation. func (mr *MockBaseClientMockRecorder) GetCertificateOperation(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCertificateOperation", reflect.TypeOf((*MockBaseClient)(nil).GetCertificateOperation), arg0, arg1, arg2) } -// GetCertificates mocks base method +// GetCertificates mocks base method. func (m *MockBaseClient) GetCertificates(arg0 context.Context, arg1 string, arg2 *int32, arg3 *bool) (keyvault.CertificateListResultPage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCertificates", arg0, arg1, arg2, arg3) @@ -89,13 +89,13 @@ func (m *MockBaseClient) GetCertificates(arg0 context.Context, arg1 string, arg2 return ret0, ret1 } -// GetCertificates indicates an expected call of GetCertificates +// GetCertificates indicates an expected call of GetCertificates. func (mr *MockBaseClientMockRecorder) GetCertificates(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCertificates", reflect.TypeOf((*MockBaseClient)(nil).GetCertificates), arg0, arg1, arg2, arg3) } -// GetSecret mocks base method +// GetSecret mocks base method. func (m *MockBaseClient) GetSecret(arg0 context.Context, arg1, arg2, arg3 string) (keyvault.SecretBundle, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSecret", arg0, arg1, arg2, arg3) @@ -104,13 +104,13 @@ func (m *MockBaseClient) GetSecret(arg0 context.Context, arg1, arg2, arg3 string return ret0, ret1 } -// GetSecret indicates an expected call of GetSecret +// GetSecret indicates an expected call of GetSecret. func (mr *MockBaseClientMockRecorder) GetSecret(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecret", reflect.TypeOf((*MockBaseClient)(nil).GetSecret), arg0, arg1, arg2, arg3) } -// GetSecrets mocks base method +// GetSecrets mocks base method. func (m *MockBaseClient) GetSecrets(arg0 context.Context, arg1 string, arg2 *int32) ([]keyvault.SecretItem, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSecrets", arg0, arg1, arg2) @@ -119,13 +119,13 @@ func (m *MockBaseClient) GetSecrets(arg0 context.Context, arg1 string, arg2 *int return ret0, ret1 } -// GetSecrets indicates an expected call of GetSecrets +// GetSecrets indicates an expected call of GetSecrets. func (mr *MockBaseClientMockRecorder) GetSecrets(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecrets", reflect.TypeOf((*MockBaseClient)(nil).GetSecrets), arg0, arg1, arg2) } -// SetSecret mocks base method +// SetSecret mocks base method. func (m *MockBaseClient) SetSecret(arg0 context.Context, arg1, arg2 string, arg3 keyvault.SecretSetParameters) (keyvault.SecretBundle, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetSecret", arg0, arg1, arg2, arg3) @@ -134,7 +134,7 @@ func (m *MockBaseClient) SetSecret(arg0 context.Context, arg1, arg2 string, arg3 return ret0, ret1 } -// SetSecret indicates an expected call of SetSecret +// SetSecret indicates an expected call of SetSecret. func (mr *MockBaseClientMockRecorder) SetSecret(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSecret", reflect.TypeOf((*MockBaseClient)(nil).SetSecret), arg0, arg1, arg2, arg3) diff --git a/pkg/util/mocks/azureclient/mgmt/authorization/authorization.go b/pkg/util/mocks/azureclient/mgmt/authorization/authorization.go index 0fe794991d6..ecab9c84690 100644 --- a/pkg/util/mocks/azureclient/mgmt/authorization/authorization.go +++ b/pkg/util/mocks/azureclient/mgmt/authorization/authorization.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockPermissionsClient is a mock of PermissionsClient interface +// MockPermissionsClient is a mock of PermissionsClient interface. type MockPermissionsClient struct { ctrl *gomock.Controller recorder *MockPermissionsClientMockRecorder } -// MockPermissionsClientMockRecorder is the mock recorder for MockPermissionsClient +// MockPermissionsClientMockRecorder is the mock recorder for MockPermissionsClient. type MockPermissionsClientMockRecorder struct { mock *MockPermissionsClient } -// NewMockPermissionsClient creates a new mock instance +// NewMockPermissionsClient creates a new mock instance. func NewMockPermissionsClient(ctrl *gomock.Controller) *MockPermissionsClient { mock := &MockPermissionsClient{ctrl: ctrl} mock.recorder = &MockPermissionsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPermissionsClient) EXPECT() *MockPermissionsClientMockRecorder { return m.recorder } -// ListForResource mocks base method +// ListForResource mocks base method. func (m *MockPermissionsClient) ListForResource(arg0 context.Context, arg1, arg2, arg3, arg4, arg5 string) ([]authorization.Permission, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListForResource", arg0, arg1, arg2, arg3, arg4, arg5) @@ -44,13 +44,13 @@ func (m *MockPermissionsClient) ListForResource(arg0 context.Context, arg1, arg2 return ret0, ret1 } -// ListForResource indicates an expected call of ListForResource +// ListForResource indicates an expected call of ListForResource. func (mr *MockPermissionsClientMockRecorder) ListForResource(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListForResource", reflect.TypeOf((*MockPermissionsClient)(nil).ListForResource), arg0, arg1, arg2, arg3, arg4, arg5) } -// ListForResourceGroup mocks base method +// ListForResourceGroup mocks base method. func (m *MockPermissionsClient) ListForResourceGroup(arg0 context.Context, arg1 string) ([]authorization.Permission, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListForResourceGroup", arg0, arg1) @@ -59,36 +59,36 @@ func (m *MockPermissionsClient) ListForResourceGroup(arg0 context.Context, arg1 return ret0, ret1 } -// ListForResourceGroup indicates an expected call of ListForResourceGroup +// ListForResourceGroup indicates an expected call of ListForResourceGroup. func (mr *MockPermissionsClientMockRecorder) ListForResourceGroup(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListForResourceGroup", reflect.TypeOf((*MockPermissionsClient)(nil).ListForResourceGroup), arg0, arg1) } -// MockRoleAssignmentsClient is a mock of RoleAssignmentsClient interface +// MockRoleAssignmentsClient is a mock of RoleAssignmentsClient interface. type MockRoleAssignmentsClient struct { ctrl *gomock.Controller recorder *MockRoleAssignmentsClientMockRecorder } -// MockRoleAssignmentsClientMockRecorder is the mock recorder for MockRoleAssignmentsClient +// MockRoleAssignmentsClientMockRecorder is the mock recorder for MockRoleAssignmentsClient. type MockRoleAssignmentsClientMockRecorder struct { mock *MockRoleAssignmentsClient } -// NewMockRoleAssignmentsClient creates a new mock instance +// NewMockRoleAssignmentsClient creates a new mock instance. func NewMockRoleAssignmentsClient(ctrl *gomock.Controller) *MockRoleAssignmentsClient { mock := &MockRoleAssignmentsClient{ctrl: ctrl} mock.recorder = &MockRoleAssignmentsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRoleAssignmentsClient) EXPECT() *MockRoleAssignmentsClientMockRecorder { return m.recorder } -// Create mocks base method +// Create mocks base method. func (m *MockRoleAssignmentsClient) Create(arg0 context.Context, arg1, arg2 string, arg3 authorization.RoleAssignmentCreateParameters) (authorization.RoleAssignment, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Create", arg0, arg1, arg2, arg3) @@ -97,13 +97,13 @@ func (m *MockRoleAssignmentsClient) Create(arg0 context.Context, arg1, arg2 stri return ret0, ret1 } -// Create indicates an expected call of Create +// Create indicates an expected call of Create. func (mr *MockRoleAssignmentsClientMockRecorder) Create(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockRoleAssignmentsClient)(nil).Create), arg0, arg1, arg2, arg3) } -// Delete mocks base method +// Delete mocks base method. func (m *MockRoleAssignmentsClient) Delete(arg0 context.Context, arg1, arg2 string) (authorization.RoleAssignment, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2) @@ -112,13 +112,13 @@ func (m *MockRoleAssignmentsClient) Delete(arg0 context.Context, arg1, arg2 stri return ret0, ret1 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockRoleAssignmentsClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockRoleAssignmentsClient)(nil).Delete), arg0, arg1, arg2) } -// ListForResource mocks base method +// ListForResource mocks base method. func (m *MockRoleAssignmentsClient) ListForResource(arg0 context.Context, arg1, arg2, arg3, arg4, arg5, arg6 string) ([]authorization.RoleAssignment, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListForResource", arg0, arg1, arg2, arg3, arg4, arg5, arg6) @@ -127,13 +127,13 @@ func (m *MockRoleAssignmentsClient) ListForResource(arg0 context.Context, arg1, return ret0, ret1 } -// ListForResource indicates an expected call of ListForResource +// ListForResource indicates an expected call of ListForResource. func (mr *MockRoleAssignmentsClientMockRecorder) ListForResource(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListForResource", reflect.TypeOf((*MockRoleAssignmentsClient)(nil).ListForResource), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// ListForResourceGroup mocks base method +// ListForResourceGroup mocks base method. func (m *MockRoleAssignmentsClient) ListForResourceGroup(arg0 context.Context, arg1, arg2 string) ([]authorization.RoleAssignment, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListForResourceGroup", arg0, arg1, arg2) @@ -142,36 +142,36 @@ func (m *MockRoleAssignmentsClient) ListForResourceGroup(arg0 context.Context, a return ret0, ret1 } -// ListForResourceGroup indicates an expected call of ListForResourceGroup +// ListForResourceGroup indicates an expected call of ListForResourceGroup. func (mr *MockRoleAssignmentsClientMockRecorder) ListForResourceGroup(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListForResourceGroup", reflect.TypeOf((*MockRoleAssignmentsClient)(nil).ListForResourceGroup), arg0, arg1, arg2) } -// MockDenyAssignmentClient is a mock of DenyAssignmentClient interface +// MockDenyAssignmentClient is a mock of DenyAssignmentClient interface. type MockDenyAssignmentClient struct { ctrl *gomock.Controller recorder *MockDenyAssignmentClientMockRecorder } -// MockDenyAssignmentClientMockRecorder is the mock recorder for MockDenyAssignmentClient +// MockDenyAssignmentClientMockRecorder is the mock recorder for MockDenyAssignmentClient. type MockDenyAssignmentClientMockRecorder struct { mock *MockDenyAssignmentClient } -// NewMockDenyAssignmentClient creates a new mock instance +// NewMockDenyAssignmentClient creates a new mock instance. func NewMockDenyAssignmentClient(ctrl *gomock.Controller) *MockDenyAssignmentClient { mock := &MockDenyAssignmentClient{ctrl: ctrl} mock.recorder = &MockDenyAssignmentClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockDenyAssignmentClient) EXPECT() *MockDenyAssignmentClientMockRecorder { return m.recorder } -// ListForResourceGroup mocks base method +// ListForResourceGroup mocks base method. func (m *MockDenyAssignmentClient) ListForResourceGroup(arg0 context.Context, arg1, arg2 string) ([]authorization.DenyAssignment, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListForResourceGroup", arg0, arg1, arg2) @@ -180,36 +180,36 @@ func (m *MockDenyAssignmentClient) ListForResourceGroup(arg0 context.Context, ar return ret0, ret1 } -// ListForResourceGroup indicates an expected call of ListForResourceGroup +// ListForResourceGroup indicates an expected call of ListForResourceGroup. func (mr *MockDenyAssignmentClientMockRecorder) ListForResourceGroup(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListForResourceGroup", reflect.TypeOf((*MockDenyAssignmentClient)(nil).ListForResourceGroup), arg0, arg1, arg2) } -// MockRoleDefinitionsClient is a mock of RoleDefinitionsClient interface +// MockRoleDefinitionsClient is a mock of RoleDefinitionsClient interface. type MockRoleDefinitionsClient struct { ctrl *gomock.Controller recorder *MockRoleDefinitionsClientMockRecorder } -// MockRoleDefinitionsClientMockRecorder is the mock recorder for MockRoleDefinitionsClient +// MockRoleDefinitionsClientMockRecorder is the mock recorder for MockRoleDefinitionsClient. type MockRoleDefinitionsClientMockRecorder struct { mock *MockRoleDefinitionsClient } -// NewMockRoleDefinitionsClient creates a new mock instance +// NewMockRoleDefinitionsClient creates a new mock instance. func NewMockRoleDefinitionsClient(ctrl *gomock.Controller) *MockRoleDefinitionsClient { mock := &MockRoleDefinitionsClient{ctrl: ctrl} mock.recorder = &MockRoleDefinitionsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRoleDefinitionsClient) EXPECT() *MockRoleDefinitionsClientMockRecorder { return m.recorder } -// Delete mocks base method +// Delete mocks base method. func (m *MockRoleDefinitionsClient) Delete(arg0 context.Context, arg1, arg2 string) (authorization.RoleDefinition, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2) @@ -218,13 +218,13 @@ func (m *MockRoleDefinitionsClient) Delete(arg0 context.Context, arg1, arg2 stri return ret0, ret1 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockRoleDefinitionsClientMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockRoleDefinitionsClient)(nil).Delete), arg0, arg1, arg2) } -// List mocks base method +// List mocks base method. func (m *MockRoleDefinitionsClient) List(arg0 context.Context, arg1, arg2 string) ([]authorization.RoleDefinition, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1, arg2) @@ -233,7 +233,7 @@ func (m *MockRoleDefinitionsClient) List(arg0 context.Context, arg1, arg2 string return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockRoleDefinitionsClientMockRecorder) List(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockRoleDefinitionsClient)(nil).List), arg0, arg1, arg2) diff --git a/pkg/util/mocks/azureclient/mgmt/compute/compute.go b/pkg/util/mocks/azureclient/mgmt/compute/compute.go index 810bff791c4..93a08101456 100644 --- a/pkg/util/mocks/azureclient/mgmt/compute/compute.go +++ b/pkg/util/mocks/azureclient/mgmt/compute/compute.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockDisksClient is a mock of DisksClient interface +// MockDisksClient is a mock of DisksClient interface. type MockDisksClient struct { ctrl *gomock.Controller recorder *MockDisksClientMockRecorder } -// MockDisksClientMockRecorder is the mock recorder for MockDisksClient +// MockDisksClientMockRecorder is the mock recorder for MockDisksClient. type MockDisksClientMockRecorder struct { mock *MockDisksClient } -// NewMockDisksClient creates a new mock instance +// NewMockDisksClient creates a new mock instance. func NewMockDisksClient(ctrl *gomock.Controller) *MockDisksClient { mock := &MockDisksClient{ctrl: ctrl} mock.recorder = &MockDisksClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockDisksClient) EXPECT() *MockDisksClientMockRecorder { return m.recorder } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockDisksClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2) @@ -43,36 +43,36 @@ func (m *MockDisksClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockDisksClientMockRecorder) DeleteAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockDisksClient)(nil).DeleteAndWait), arg0, arg1, arg2) } -// MockResourceSkusClient is a mock of ResourceSkusClient interface +// MockResourceSkusClient is a mock of ResourceSkusClient interface. type MockResourceSkusClient struct { ctrl *gomock.Controller recorder *MockResourceSkusClientMockRecorder } -// MockResourceSkusClientMockRecorder is the mock recorder for MockResourceSkusClient +// MockResourceSkusClientMockRecorder is the mock recorder for MockResourceSkusClient. type MockResourceSkusClientMockRecorder struct { mock *MockResourceSkusClient } -// NewMockResourceSkusClient creates a new mock instance +// NewMockResourceSkusClient creates a new mock instance. func NewMockResourceSkusClient(ctrl *gomock.Controller) *MockResourceSkusClient { mock := &MockResourceSkusClient{ctrl: ctrl} mock.recorder = &MockResourceSkusClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockResourceSkusClient) EXPECT() *MockResourceSkusClientMockRecorder { return m.recorder } -// List mocks base method +// List mocks base method. func (m *MockResourceSkusClient) List(arg0 context.Context, arg1 string) ([]compute.ResourceSku, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -81,36 +81,36 @@ func (m *MockResourceSkusClient) List(arg0 context.Context, arg1 string) ([]comp return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockResourceSkusClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockResourceSkusClient)(nil).List), arg0, arg1) } -// MockVirtualMachinesClient is a mock of VirtualMachinesClient interface +// MockVirtualMachinesClient is a mock of VirtualMachinesClient interface. type MockVirtualMachinesClient struct { ctrl *gomock.Controller recorder *MockVirtualMachinesClientMockRecorder } -// MockVirtualMachinesClientMockRecorder is the mock recorder for MockVirtualMachinesClient +// MockVirtualMachinesClientMockRecorder is the mock recorder for MockVirtualMachinesClient. type MockVirtualMachinesClientMockRecorder struct { mock *MockVirtualMachinesClient } -// NewMockVirtualMachinesClient creates a new mock instance +// NewMockVirtualMachinesClient creates a new mock instance. func NewMockVirtualMachinesClient(ctrl *gomock.Controller) *MockVirtualMachinesClient { mock := &MockVirtualMachinesClient{ctrl: ctrl} mock.recorder = &MockVirtualMachinesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockVirtualMachinesClient) EXPECT() *MockVirtualMachinesClientMockRecorder { return m.recorder } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockVirtualMachinesClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2 string, arg3 compute.VirtualMachine) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3) @@ -118,13 +118,13 @@ func (m *MockVirtualMachinesClient) CreateOrUpdateAndWait(arg0 context.Context, return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockVirtualMachinesClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockVirtualMachinesClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockVirtualMachinesClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string, arg3 *bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2, arg3) @@ -132,13 +132,13 @@ func (m *MockVirtualMachinesClient) DeleteAndWait(arg0 context.Context, arg1, ar return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockVirtualMachinesClientMockRecorder) DeleteAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockVirtualMachinesClient)(nil).DeleteAndWait), arg0, arg1, arg2, arg3) } -// Get mocks base method +// Get mocks base method. func (m *MockVirtualMachinesClient) Get(arg0 context.Context, arg1, arg2 string, arg3 compute.InstanceViewTypes) (compute.VirtualMachine, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -147,13 +147,13 @@ func (m *MockVirtualMachinesClient) Get(arg0 context.Context, arg1, arg2 string, return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockVirtualMachinesClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualMachinesClient)(nil).Get), arg0, arg1, arg2, arg3) } -// List mocks base method +// List mocks base method. func (m *MockVirtualMachinesClient) List(arg0 context.Context, arg1 string) ([]compute.VirtualMachine, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -162,13 +162,13 @@ func (m *MockVirtualMachinesClient) List(arg0 context.Context, arg1 string) ([]c return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockVirtualMachinesClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockVirtualMachinesClient)(nil).List), arg0, arg1) } -// RedeployAndWait mocks base method +// RedeployAndWait mocks base method. func (m *MockVirtualMachinesClient) RedeployAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RedeployAndWait", arg0, arg1, arg2) @@ -176,13 +176,13 @@ func (m *MockVirtualMachinesClient) RedeployAndWait(arg0 context.Context, arg1, return ret0 } -// RedeployAndWait indicates an expected call of RedeployAndWait +// RedeployAndWait indicates an expected call of RedeployAndWait. func (mr *MockVirtualMachinesClientMockRecorder) RedeployAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RedeployAndWait", reflect.TypeOf((*MockVirtualMachinesClient)(nil).RedeployAndWait), arg0, arg1, arg2) } -// StartAndWait mocks base method +// StartAndWait mocks base method. func (m *MockVirtualMachinesClient) StartAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StartAndWait", arg0, arg1, arg2) @@ -190,36 +190,36 @@ func (m *MockVirtualMachinesClient) StartAndWait(arg0 context.Context, arg1, arg return ret0 } -// StartAndWait indicates an expected call of StartAndWait +// StartAndWait indicates an expected call of StartAndWait. func (mr *MockVirtualMachinesClientMockRecorder) StartAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartAndWait", reflect.TypeOf((*MockVirtualMachinesClient)(nil).StartAndWait), arg0, arg1, arg2) } -// MockUsageClient is a mock of UsageClient interface +// MockUsageClient is a mock of UsageClient interface. type MockUsageClient struct { ctrl *gomock.Controller recorder *MockUsageClientMockRecorder } -// MockUsageClientMockRecorder is the mock recorder for MockUsageClient +// MockUsageClientMockRecorder is the mock recorder for MockUsageClient. type MockUsageClientMockRecorder struct { mock *MockUsageClient } -// NewMockUsageClient creates a new mock instance +// NewMockUsageClient creates a new mock instance. func NewMockUsageClient(ctrl *gomock.Controller) *MockUsageClient { mock := &MockUsageClient{ctrl: ctrl} mock.recorder = &MockUsageClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockUsageClient) EXPECT() *MockUsageClientMockRecorder { return m.recorder } -// List mocks base method +// List mocks base method. func (m *MockUsageClient) List(arg0 context.Context, arg1 string) ([]compute.Usage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -228,36 +228,36 @@ func (m *MockUsageClient) List(arg0 context.Context, arg1 string) ([]compute.Usa return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockUsageClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockUsageClient)(nil).List), arg0, arg1) } -// MockVirtualMachineScaleSetVMsClient is a mock of VirtualMachineScaleSetVMsClient interface +// MockVirtualMachineScaleSetVMsClient is a mock of VirtualMachineScaleSetVMsClient interface. type MockVirtualMachineScaleSetVMsClient struct { ctrl *gomock.Controller recorder *MockVirtualMachineScaleSetVMsClientMockRecorder } -// MockVirtualMachineScaleSetVMsClientMockRecorder is the mock recorder for MockVirtualMachineScaleSetVMsClient +// MockVirtualMachineScaleSetVMsClientMockRecorder is the mock recorder for MockVirtualMachineScaleSetVMsClient. type MockVirtualMachineScaleSetVMsClientMockRecorder struct { mock *MockVirtualMachineScaleSetVMsClient } -// NewMockVirtualMachineScaleSetVMsClient creates a new mock instance +// NewMockVirtualMachineScaleSetVMsClient creates a new mock instance. func NewMockVirtualMachineScaleSetVMsClient(ctrl *gomock.Controller) *MockVirtualMachineScaleSetVMsClient { mock := &MockVirtualMachineScaleSetVMsClient{ctrl: ctrl} mock.recorder = &MockVirtualMachineScaleSetVMsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockVirtualMachineScaleSetVMsClient) EXPECT() *MockVirtualMachineScaleSetVMsClientMockRecorder { return m.recorder } -// GetInstanceView mocks base method +// GetInstanceView mocks base method. func (m *MockVirtualMachineScaleSetVMsClient) GetInstanceView(arg0 context.Context, arg1, arg2, arg3 string) (compute.VirtualMachineScaleSetVMInstanceView, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetInstanceView", arg0, arg1, arg2, arg3) @@ -266,13 +266,13 @@ func (m *MockVirtualMachineScaleSetVMsClient) GetInstanceView(arg0 context.Conte return ret0, ret1 } -// GetInstanceView indicates an expected call of GetInstanceView +// GetInstanceView indicates an expected call of GetInstanceView. func (mr *MockVirtualMachineScaleSetVMsClientMockRecorder) GetInstanceView(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceView", reflect.TypeOf((*MockVirtualMachineScaleSetVMsClient)(nil).GetInstanceView), arg0, arg1, arg2, arg3) } -// List mocks base method +// List mocks base method. func (m *MockVirtualMachineScaleSetVMsClient) List(arg0 context.Context, arg1, arg2, arg3, arg4, arg5 string) ([]compute.VirtualMachineScaleSetVM, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1, arg2, arg3, arg4, arg5) @@ -281,13 +281,13 @@ func (m *MockVirtualMachineScaleSetVMsClient) List(arg0 context.Context, arg1, a return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockVirtualMachineScaleSetVMsClientMockRecorder) List(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockVirtualMachineScaleSetVMsClient)(nil).List), arg0, arg1, arg2, arg3, arg4, arg5) } -// RunCommandAndWait mocks base method +// RunCommandAndWait mocks base method. func (m *MockVirtualMachineScaleSetVMsClient) RunCommandAndWait(arg0 context.Context, arg1, arg2, arg3 string, arg4 compute.RunCommandInput) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RunCommandAndWait", arg0, arg1, arg2, arg3, arg4) @@ -295,36 +295,36 @@ func (m *MockVirtualMachineScaleSetVMsClient) RunCommandAndWait(arg0 context.Con return ret0 } -// RunCommandAndWait indicates an expected call of RunCommandAndWait +// RunCommandAndWait indicates an expected call of RunCommandAndWait. func (mr *MockVirtualMachineScaleSetVMsClientMockRecorder) RunCommandAndWait(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunCommandAndWait", reflect.TypeOf((*MockVirtualMachineScaleSetVMsClient)(nil).RunCommandAndWait), arg0, arg1, arg2, arg3, arg4) } -// MockVirtualMachineScaleSetsClient is a mock of VirtualMachineScaleSetsClient interface +// MockVirtualMachineScaleSetsClient is a mock of VirtualMachineScaleSetsClient interface. type MockVirtualMachineScaleSetsClient struct { ctrl *gomock.Controller recorder *MockVirtualMachineScaleSetsClientMockRecorder } -// MockVirtualMachineScaleSetsClientMockRecorder is the mock recorder for MockVirtualMachineScaleSetsClient +// MockVirtualMachineScaleSetsClientMockRecorder is the mock recorder for MockVirtualMachineScaleSetsClient. type MockVirtualMachineScaleSetsClientMockRecorder struct { mock *MockVirtualMachineScaleSetsClient } -// NewMockVirtualMachineScaleSetsClient creates a new mock instance +// NewMockVirtualMachineScaleSetsClient creates a new mock instance. func NewMockVirtualMachineScaleSetsClient(ctrl *gomock.Controller) *MockVirtualMachineScaleSetsClient { mock := &MockVirtualMachineScaleSetsClient{ctrl: ctrl} mock.recorder = &MockVirtualMachineScaleSetsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockVirtualMachineScaleSetsClient) EXPECT() *MockVirtualMachineScaleSetsClientMockRecorder { return m.recorder } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockVirtualMachineScaleSetsClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2) @@ -332,13 +332,13 @@ func (m *MockVirtualMachineScaleSetsClient) DeleteAndWait(arg0 context.Context, return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockVirtualMachineScaleSetsClientMockRecorder) DeleteAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).DeleteAndWait), arg0, arg1, arg2) } -// List mocks base method +// List mocks base method. func (m *MockVirtualMachineScaleSetsClient) List(arg0 context.Context, arg1 string) ([]compute.VirtualMachineScaleSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -347,7 +347,7 @@ func (m *MockVirtualMachineScaleSetsClient) List(arg0 context.Context, arg1 stri return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockVirtualMachineScaleSetsClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).List), arg0, arg1) diff --git a/pkg/util/mocks/azureclient/mgmt/containerregistry/containerregistry.go b/pkg/util/mocks/azureclient/mgmt/containerregistry/containerregistry.go index 21d07d102b9..b5cc176329b 100644 --- a/pkg/util/mocks/azureclient/mgmt/containerregistry/containerregistry.go +++ b/pkg/util/mocks/azureclient/mgmt/containerregistry/containerregistry.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockTokensClient is a mock of TokensClient interface +// MockTokensClient is a mock of TokensClient interface. type MockTokensClient struct { ctrl *gomock.Controller recorder *MockTokensClientMockRecorder } -// MockTokensClientMockRecorder is the mock recorder for MockTokensClient +// MockTokensClientMockRecorder is the mock recorder for MockTokensClient. type MockTokensClientMockRecorder struct { mock *MockTokensClient } -// NewMockTokensClient creates a new mock instance +// NewMockTokensClient creates a new mock instance. func NewMockTokensClient(ctrl *gomock.Controller) *MockTokensClient { mock := &MockTokensClient{ctrl: ctrl} mock.recorder = &MockTokensClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockTokensClient) EXPECT() *MockTokensClientMockRecorder { return m.recorder } -// CreateAndWait mocks base method +// CreateAndWait mocks base method. func (m *MockTokensClient) CreateAndWait(arg0 context.Context, arg1, arg2, arg3 string, arg4 containerregistry.Token) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateAndWait", arg0, arg1, arg2, arg3, arg4) @@ -43,13 +43,13 @@ func (m *MockTokensClient) CreateAndWait(arg0 context.Context, arg1, arg2, arg3 return ret0 } -// CreateAndWait indicates an expected call of CreateAndWait +// CreateAndWait indicates an expected call of CreateAndWait. func (mr *MockTokensClientMockRecorder) CreateAndWait(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAndWait", reflect.TypeOf((*MockTokensClient)(nil).CreateAndWait), arg0, arg1, arg2, arg3, arg4) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockTokensClient) DeleteAndWait(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2, arg3) @@ -57,36 +57,36 @@ func (m *MockTokensClient) DeleteAndWait(arg0 context.Context, arg1, arg2, arg3 return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockTokensClientMockRecorder) DeleteAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockTokensClient)(nil).DeleteAndWait), arg0, arg1, arg2, arg3) } -// MockRegistriesClient is a mock of RegistriesClient interface +// MockRegistriesClient is a mock of RegistriesClient interface. type MockRegistriesClient struct { ctrl *gomock.Controller recorder *MockRegistriesClientMockRecorder } -// MockRegistriesClientMockRecorder is the mock recorder for MockRegistriesClient +// MockRegistriesClientMockRecorder is the mock recorder for MockRegistriesClient. type MockRegistriesClientMockRecorder struct { mock *MockRegistriesClient } -// NewMockRegistriesClient creates a new mock instance +// NewMockRegistriesClient creates a new mock instance. func NewMockRegistriesClient(ctrl *gomock.Controller) *MockRegistriesClient { mock := &MockRegistriesClient{ctrl: ctrl} mock.recorder = &MockRegistriesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRegistriesClient) EXPECT() *MockRegistriesClientMockRecorder { return m.recorder } -// GenerateCredentials mocks base method +// GenerateCredentials mocks base method. func (m *MockRegistriesClient) GenerateCredentials(arg0 context.Context, arg1, arg2 string, arg3 containerregistry.GenerateCredentialsParameters) (containerregistry.GenerateCredentialsResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateCredentials", arg0, arg1, arg2, arg3) @@ -95,7 +95,7 @@ func (m *MockRegistriesClient) GenerateCredentials(arg0 context.Context, arg1, a return ret0, ret1 } -// GenerateCredentials indicates an expected call of GenerateCredentials +// GenerateCredentials indicates an expected call of GenerateCredentials. func (mr *MockRegistriesClientMockRecorder) GenerateCredentials(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCredentials", reflect.TypeOf((*MockRegistriesClient)(nil).GenerateCredentials), arg0, arg1, arg2, arg3) diff --git a/pkg/util/mocks/azureclient/mgmt/dns/dns.go b/pkg/util/mocks/azureclient/mgmt/dns/dns.go index 0cccfb22708..a46dc959a20 100644 --- a/pkg/util/mocks/azureclient/mgmt/dns/dns.go +++ b/pkg/util/mocks/azureclient/mgmt/dns/dns.go @@ -13,30 +13,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockRecordSetsClient is a mock of RecordSetsClient interface +// MockRecordSetsClient is a mock of RecordSetsClient interface. type MockRecordSetsClient struct { ctrl *gomock.Controller recorder *MockRecordSetsClientMockRecorder } -// MockRecordSetsClientMockRecorder is the mock recorder for MockRecordSetsClient +// MockRecordSetsClientMockRecorder is the mock recorder for MockRecordSetsClient. type MockRecordSetsClientMockRecorder struct { mock *MockRecordSetsClient } -// NewMockRecordSetsClient creates a new mock instance +// NewMockRecordSetsClient creates a new mock instance. func NewMockRecordSetsClient(ctrl *gomock.Controller) *MockRecordSetsClient { mock := &MockRecordSetsClient{ctrl: ctrl} mock.recorder = &MockRecordSetsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRecordSetsClient) EXPECT() *MockRecordSetsClientMockRecorder { return m.recorder } -// CreateOrUpdate mocks base method +// CreateOrUpdate mocks base method. func (m *MockRecordSetsClient) CreateOrUpdate(arg0 context.Context, arg1, arg2, arg3 string, arg4 dns.RecordType, arg5 dns.RecordSet, arg6, arg7 string) (dns.RecordSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) @@ -45,13 +45,13 @@ func (m *MockRecordSetsClient) CreateOrUpdate(arg0 context.Context, arg1, arg2, return ret0, ret1 } -// CreateOrUpdate indicates an expected call of CreateOrUpdate +// CreateOrUpdate indicates an expected call of CreateOrUpdate. func (mr *MockRecordSetsClientMockRecorder) CreateOrUpdate(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockRecordSetsClient)(nil).CreateOrUpdate), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } -// Delete mocks base method +// Delete mocks base method. func (m *MockRecordSetsClient) Delete(arg0 context.Context, arg1, arg2, arg3 string, arg4 dns.RecordType, arg5 string) (autorest.Response, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3, arg4, arg5) @@ -60,13 +60,13 @@ func (m *MockRecordSetsClient) Delete(arg0 context.Context, arg1, arg2, arg3 str return ret0, ret1 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockRecordSetsClientMockRecorder) Delete(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockRecordSetsClient)(nil).Delete), arg0, arg1, arg2, arg3, arg4, arg5) } -// Get mocks base method +// Get mocks base method. func (m *MockRecordSetsClient) Get(arg0 context.Context, arg1, arg2, arg3 string, arg4 dns.RecordType) (dns.RecordSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4) @@ -75,36 +75,36 @@ func (m *MockRecordSetsClient) Get(arg0 context.Context, arg1, arg2, arg3 string return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockRecordSetsClientMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockRecordSetsClient)(nil).Get), arg0, arg1, arg2, arg3, arg4) } -// MockZonesClient is a mock of ZonesClient interface +// MockZonesClient is a mock of ZonesClient interface. type MockZonesClient struct { ctrl *gomock.Controller recorder *MockZonesClientMockRecorder } -// MockZonesClientMockRecorder is the mock recorder for MockZonesClient +// MockZonesClientMockRecorder is the mock recorder for MockZonesClient. type MockZonesClientMockRecorder struct { mock *MockZonesClient } -// NewMockZonesClient creates a new mock instance +// NewMockZonesClient creates a new mock instance. func NewMockZonesClient(ctrl *gomock.Controller) *MockZonesClient { mock := &MockZonesClient{ctrl: ctrl} mock.recorder = &MockZonesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockZonesClient) EXPECT() *MockZonesClientMockRecorder { return m.recorder } -// Get mocks base method +// Get mocks base method. func (m *MockZonesClient) Get(arg0 context.Context, arg1, arg2 string) (dns.Zone, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) @@ -113,7 +113,7 @@ func (m *MockZonesClient) Get(arg0 context.Context, arg1, arg2 string) (dns.Zone return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockZonesClientMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockZonesClient)(nil).Get), arg0, arg1, arg2) diff --git a/pkg/util/mocks/azureclient/mgmt/features/features.go b/pkg/util/mocks/azureclient/mgmt/features/features.go index 2b2d91997f4..21717e7584e 100644 --- a/pkg/util/mocks/azureclient/mgmt/features/features.go +++ b/pkg/util/mocks/azureclient/mgmt/features/features.go @@ -14,30 +14,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockClient is a mock of Client interface +// MockClient is a mock of Client interface. type MockClient struct { ctrl *gomock.Controller recorder *MockClientMockRecorder } -// MockClientMockRecorder is the mock recorder for MockClient +// MockClientMockRecorder is the mock recorder for MockClient. type MockClientMockRecorder struct { mock *MockClient } -// NewMockClient creates a new mock instance +// NewMockClient creates a new mock instance. func NewMockClient(ctrl *gomock.Controller) *MockClient { mock := &MockClient{ctrl: ctrl} mock.recorder = &MockClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockClient) EXPECT() *MockClientMockRecorder { return m.recorder } -// Get mocks base method +// Get mocks base method. func (m *MockClient) Get(arg0 context.Context, arg1, arg2 string) (features.Result, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) @@ -46,13 +46,13 @@ func (m *MockClient) Get(arg0 context.Context, arg1, arg2 string) (features.Resu return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), arg0, arg1, arg2) } -// Register mocks base method +// Register mocks base method. func (m *MockClient) Register(arg0 context.Context, arg1, arg2 string) (features.Result, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Register", arg0, arg1, arg2) @@ -61,36 +61,36 @@ func (m *MockClient) Register(arg0 context.Context, arg1, arg2 string) (features return ret0, ret1 } -// Register indicates an expected call of Register +// Register indicates an expected call of Register. func (mr *MockClientMockRecorder) Register(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockClient)(nil).Register), arg0, arg1, arg2) } -// MockDeploymentsClient is a mock of DeploymentsClient interface +// MockDeploymentsClient is a mock of DeploymentsClient interface. type MockDeploymentsClient struct { ctrl *gomock.Controller recorder *MockDeploymentsClientMockRecorder } -// MockDeploymentsClientMockRecorder is the mock recorder for MockDeploymentsClient +// MockDeploymentsClientMockRecorder is the mock recorder for MockDeploymentsClient. type MockDeploymentsClientMockRecorder struct { mock *MockDeploymentsClient } -// NewMockDeploymentsClient creates a new mock instance +// NewMockDeploymentsClient creates a new mock instance. func NewMockDeploymentsClient(ctrl *gomock.Controller) *MockDeploymentsClient { mock := &MockDeploymentsClient{ctrl: ctrl} mock.recorder = &MockDeploymentsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockDeploymentsClient) EXPECT() *MockDeploymentsClientMockRecorder { return m.recorder } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockDeploymentsClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2 string, arg3 features0.Deployment) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3) @@ -98,13 +98,13 @@ func (m *MockDeploymentsClient) CreateOrUpdateAndWait(arg0 context.Context, arg1 return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockDeploymentsClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockDeploymentsClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3) } -// CreateOrUpdateAtSubscriptionScopeAndWait mocks base method +// CreateOrUpdateAtSubscriptionScopeAndWait mocks base method. func (m *MockDeploymentsClient) CreateOrUpdateAtSubscriptionScopeAndWait(arg0 context.Context, arg1 string, arg2 features0.Deployment) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAtSubscriptionScopeAndWait", arg0, arg1, arg2) @@ -112,13 +112,13 @@ func (m *MockDeploymentsClient) CreateOrUpdateAtSubscriptionScopeAndWait(arg0 co return ret0 } -// CreateOrUpdateAtSubscriptionScopeAndWait indicates an expected call of CreateOrUpdateAtSubscriptionScopeAndWait +// CreateOrUpdateAtSubscriptionScopeAndWait indicates an expected call of CreateOrUpdateAtSubscriptionScopeAndWait. func (mr *MockDeploymentsClientMockRecorder) CreateOrUpdateAtSubscriptionScopeAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAtSubscriptionScopeAndWait", reflect.TypeOf((*MockDeploymentsClient)(nil).CreateOrUpdateAtSubscriptionScopeAndWait), arg0, arg1, arg2) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockDeploymentsClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2) @@ -126,13 +126,13 @@ func (m *MockDeploymentsClient) DeleteAndWait(arg0 context.Context, arg1, arg2 s return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockDeploymentsClientMockRecorder) DeleteAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockDeploymentsClient)(nil).DeleteAndWait), arg0, arg1, arg2) } -// Get mocks base method +// Get mocks base method. func (m *MockDeploymentsClient) Get(arg0 context.Context, arg1, arg2 string) (features0.DeploymentExtended, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) @@ -141,13 +141,13 @@ func (m *MockDeploymentsClient) Get(arg0 context.Context, arg1, arg2 string) (fe return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockDeploymentsClientMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDeploymentsClient)(nil).Get), arg0, arg1, arg2) } -// Wait mocks base method +// Wait mocks base method. func (m *MockDeploymentsClient) Wait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Wait", arg0, arg1, arg2) @@ -155,36 +155,36 @@ func (m *MockDeploymentsClient) Wait(arg0 context.Context, arg1, arg2 string) er return ret0 } -// Wait indicates an expected call of Wait +// Wait indicates an expected call of Wait. func (mr *MockDeploymentsClientMockRecorder) Wait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Wait", reflect.TypeOf((*MockDeploymentsClient)(nil).Wait), arg0, arg1, arg2) } -// MockProvidersClient is a mock of ProvidersClient interface +// MockProvidersClient is a mock of ProvidersClient interface. type MockProvidersClient struct { ctrl *gomock.Controller recorder *MockProvidersClientMockRecorder } -// MockProvidersClientMockRecorder is the mock recorder for MockProvidersClient +// MockProvidersClientMockRecorder is the mock recorder for MockProvidersClient. type MockProvidersClientMockRecorder struct { mock *MockProvidersClient } -// NewMockProvidersClient creates a new mock instance +// NewMockProvidersClient creates a new mock instance. func NewMockProvidersClient(ctrl *gomock.Controller) *MockProvidersClient { mock := &MockProvidersClient{ctrl: ctrl} mock.recorder = &MockProvidersClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockProvidersClient) EXPECT() *MockProvidersClientMockRecorder { return m.recorder } -// List mocks base method +// List mocks base method. func (m *MockProvidersClient) List(arg0 context.Context, arg1 *int32, arg2 string) ([]features0.Provider, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1, arg2) @@ -193,13 +193,13 @@ func (m *MockProvidersClient) List(arg0 context.Context, arg1 *int32, arg2 strin return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockProvidersClientMockRecorder) List(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockProvidersClient)(nil).List), arg0, arg1, arg2) } -// Register mocks base method +// Register mocks base method. func (m *MockProvidersClient) Register(arg0 context.Context, arg1 string) (features0.Provider, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Register", arg0, arg1) @@ -208,36 +208,36 @@ func (m *MockProvidersClient) Register(arg0 context.Context, arg1 string) (featu return ret0, ret1 } -// Register indicates an expected call of Register +// Register indicates an expected call of Register. func (mr *MockProvidersClientMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockProvidersClient)(nil).Register), arg0, arg1) } -// MockResourceGroupsClient is a mock of ResourceGroupsClient interface +// MockResourceGroupsClient is a mock of ResourceGroupsClient interface. type MockResourceGroupsClient struct { ctrl *gomock.Controller recorder *MockResourceGroupsClientMockRecorder } -// MockResourceGroupsClientMockRecorder is the mock recorder for MockResourceGroupsClient +// MockResourceGroupsClientMockRecorder is the mock recorder for MockResourceGroupsClient. type MockResourceGroupsClientMockRecorder struct { mock *MockResourceGroupsClient } -// NewMockResourceGroupsClient creates a new mock instance +// NewMockResourceGroupsClient creates a new mock instance. func NewMockResourceGroupsClient(ctrl *gomock.Controller) *MockResourceGroupsClient { mock := &MockResourceGroupsClient{ctrl: ctrl} mock.recorder = &MockResourceGroupsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockResourceGroupsClient) EXPECT() *MockResourceGroupsClientMockRecorder { return m.recorder } -// CreateOrUpdate mocks base method +// CreateOrUpdate mocks base method. func (m *MockResourceGroupsClient) CreateOrUpdate(arg0 context.Context, arg1 string, arg2 features0.ResourceGroup) (features0.ResourceGroup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2) @@ -246,13 +246,13 @@ func (m *MockResourceGroupsClient) CreateOrUpdate(arg0 context.Context, arg1 str return ret0, ret1 } -// CreateOrUpdate indicates an expected call of CreateOrUpdate +// CreateOrUpdate indicates an expected call of CreateOrUpdate. func (mr *MockResourceGroupsClientMockRecorder) CreateOrUpdate(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockResourceGroupsClient)(nil).CreateOrUpdate), arg0, arg1, arg2) } -// Delete mocks base method +// Delete mocks base method. func (m *MockResourceGroupsClient) Delete(arg0 context.Context, arg1 string) (features0.ResourceGroupsDeleteFuture, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1) @@ -261,13 +261,13 @@ func (m *MockResourceGroupsClient) Delete(arg0 context.Context, arg1 string) (fe return ret0, ret1 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockResourceGroupsClientMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockResourceGroupsClient)(nil).Delete), arg0, arg1) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockResourceGroupsClient) DeleteAndWait(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1) @@ -275,13 +275,13 @@ func (m *MockResourceGroupsClient) DeleteAndWait(arg0 context.Context, arg1 stri return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockResourceGroupsClientMockRecorder) DeleteAndWait(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockResourceGroupsClient)(nil).DeleteAndWait), arg0, arg1) } -// Get mocks base method +// Get mocks base method. func (m *MockResourceGroupsClient) Get(arg0 context.Context, arg1 string) (features0.ResourceGroup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1) @@ -290,13 +290,13 @@ func (m *MockResourceGroupsClient) Get(arg0 context.Context, arg1 string) (featu return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockResourceGroupsClientMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockResourceGroupsClient)(nil).Get), arg0, arg1) } -// List mocks base method +// List mocks base method. func (m *MockResourceGroupsClient) List(arg0 context.Context, arg1 string, arg2 *int32) ([]features0.ResourceGroup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1, arg2) @@ -305,36 +305,36 @@ func (m *MockResourceGroupsClient) List(arg0 context.Context, arg1 string, arg2 return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockResourceGroupsClientMockRecorder) List(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockResourceGroupsClient)(nil).List), arg0, arg1, arg2) } -// MockResourcesClient is a mock of ResourcesClient interface +// MockResourcesClient is a mock of ResourcesClient interface. type MockResourcesClient struct { ctrl *gomock.Controller recorder *MockResourcesClientMockRecorder } -// MockResourcesClientMockRecorder is the mock recorder for MockResourcesClient +// MockResourcesClientMockRecorder is the mock recorder for MockResourcesClient. type MockResourcesClientMockRecorder struct { mock *MockResourcesClient } -// NewMockResourcesClient creates a new mock instance +// NewMockResourcesClient creates a new mock instance. func NewMockResourcesClient(ctrl *gomock.Controller) *MockResourcesClient { mock := &MockResourcesClient{ctrl: ctrl} mock.recorder = &MockResourcesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockResourcesClient) EXPECT() *MockResourcesClientMockRecorder { return m.recorder } -// Client mocks base method +// Client mocks base method. func (m *MockResourcesClient) Client() autorest.Client { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Client") @@ -342,13 +342,13 @@ func (m *MockResourcesClient) Client() autorest.Client { return ret0 } -// Client indicates an expected call of Client +// Client indicates an expected call of Client. func (mr *MockResourcesClientMockRecorder) Client() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Client", reflect.TypeOf((*MockResourcesClient)(nil).Client)) } -// DeleteByID mocks base method +// DeleteByID mocks base method. func (m *MockResourcesClient) DeleteByID(arg0 context.Context, arg1, arg2 string) (features0.ResourcesDeleteByIDFuture, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteByID", arg0, arg1, arg2) @@ -357,13 +357,13 @@ func (m *MockResourcesClient) DeleteByID(arg0 context.Context, arg1, arg2 string return ret0, ret1 } -// DeleteByID indicates an expected call of DeleteByID +// DeleteByID indicates an expected call of DeleteByID. func (mr *MockResourcesClientMockRecorder) DeleteByID(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteByID", reflect.TypeOf((*MockResourcesClient)(nil).DeleteByID), arg0, arg1, arg2) } -// GetByID mocks base method +// GetByID mocks base method. func (m *MockResourcesClient) GetByID(arg0 context.Context, arg1, arg2 string) (features0.GenericResource, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetByID", arg0, arg1, arg2) @@ -372,13 +372,13 @@ func (m *MockResourcesClient) GetByID(arg0 context.Context, arg1, arg2 string) ( return ret0, ret1 } -// GetByID indicates an expected call of GetByID +// GetByID indicates an expected call of GetByID. func (mr *MockResourcesClientMockRecorder) GetByID(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByID", reflect.TypeOf((*MockResourcesClient)(nil).GetByID), arg0, arg1, arg2) } -// ListByResourceGroup mocks base method +// ListByResourceGroup mocks base method. func (m *MockResourcesClient) ListByResourceGroup(arg0 context.Context, arg1, arg2, arg3 string, arg4 *int32) ([]features0.GenericResourceExpanded, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListByResourceGroup", arg0, arg1, arg2, arg3, arg4) @@ -387,7 +387,7 @@ func (m *MockResourcesClient) ListByResourceGroup(arg0 context.Context, arg1, ar return ret0, ret1 } -// ListByResourceGroup indicates an expected call of ListByResourceGroup +// ListByResourceGroup indicates an expected call of ListByResourceGroup. func (mr *MockResourcesClientMockRecorder) ListByResourceGroup(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockResourcesClient)(nil).ListByResourceGroup), arg0, arg1, arg2, arg3, arg4) diff --git a/pkg/util/mocks/azureclient/mgmt/network/network.go b/pkg/util/mocks/azureclient/mgmt/network/network.go index 6f73dd6682a..371afedc059 100644 --- a/pkg/util/mocks/azureclient/mgmt/network/network.go +++ b/pkg/util/mocks/azureclient/mgmt/network/network.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockInterfacesClient is a mock of InterfacesClient interface +// MockInterfacesClient is a mock of InterfacesClient interface. type MockInterfacesClient struct { ctrl *gomock.Controller recorder *MockInterfacesClientMockRecorder } -// MockInterfacesClientMockRecorder is the mock recorder for MockInterfacesClient +// MockInterfacesClientMockRecorder is the mock recorder for MockInterfacesClient. type MockInterfacesClientMockRecorder struct { mock *MockInterfacesClient } -// NewMockInterfacesClient creates a new mock instance +// NewMockInterfacesClient creates a new mock instance. func NewMockInterfacesClient(ctrl *gomock.Controller) *MockInterfacesClient { mock := &MockInterfacesClient{ctrl: ctrl} mock.recorder = &MockInterfacesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInterfacesClient) EXPECT() *MockInterfacesClientMockRecorder { return m.recorder } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockInterfacesClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2 string, arg3 network.Interface) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3) @@ -43,13 +43,13 @@ func (m *MockInterfacesClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockInterfacesClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockInterfacesClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockInterfacesClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2) @@ -57,13 +57,13 @@ func (m *MockInterfacesClient) DeleteAndWait(arg0 context.Context, arg1, arg2 st return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockInterfacesClientMockRecorder) DeleteAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockInterfacesClient)(nil).DeleteAndWait), arg0, arg1, arg2) } -// Get mocks base method +// Get mocks base method. func (m *MockInterfacesClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.Interface, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -72,36 +72,36 @@ func (m *MockInterfacesClient) Get(arg0 context.Context, arg1, arg2, arg3 string return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockInterfacesClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterfacesClient)(nil).Get), arg0, arg1, arg2, arg3) } -// MockLoadBalancersClient is a mock of LoadBalancersClient interface +// MockLoadBalancersClient is a mock of LoadBalancersClient interface. type MockLoadBalancersClient struct { ctrl *gomock.Controller recorder *MockLoadBalancersClientMockRecorder } -// MockLoadBalancersClientMockRecorder is the mock recorder for MockLoadBalancersClient +// MockLoadBalancersClientMockRecorder is the mock recorder for MockLoadBalancersClient. type MockLoadBalancersClientMockRecorder struct { mock *MockLoadBalancersClient } -// NewMockLoadBalancersClient creates a new mock instance +// NewMockLoadBalancersClient creates a new mock instance. func NewMockLoadBalancersClient(ctrl *gomock.Controller) *MockLoadBalancersClient { mock := &MockLoadBalancersClient{ctrl: ctrl} mock.recorder = &MockLoadBalancersClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockLoadBalancersClient) EXPECT() *MockLoadBalancersClientMockRecorder { return m.recorder } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockLoadBalancersClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2 string, arg3 network.LoadBalancer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3) @@ -109,13 +109,13 @@ func (m *MockLoadBalancersClient) CreateOrUpdateAndWait(arg0 context.Context, ar return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockLoadBalancersClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockLoadBalancersClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3) } -// Get mocks base method +// Get mocks base method. func (m *MockLoadBalancersClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.LoadBalancer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -124,36 +124,36 @@ func (m *MockLoadBalancersClient) Get(arg0 context.Context, arg1, arg2, arg3 str return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockLoadBalancersClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockLoadBalancersClient)(nil).Get), arg0, arg1, arg2, arg3) } -// MockPrivateEndpointsClient is a mock of PrivateEndpointsClient interface +// MockPrivateEndpointsClient is a mock of PrivateEndpointsClient interface. type MockPrivateEndpointsClient struct { ctrl *gomock.Controller recorder *MockPrivateEndpointsClientMockRecorder } -// MockPrivateEndpointsClientMockRecorder is the mock recorder for MockPrivateEndpointsClient +// MockPrivateEndpointsClientMockRecorder is the mock recorder for MockPrivateEndpointsClient. type MockPrivateEndpointsClientMockRecorder struct { mock *MockPrivateEndpointsClient } -// NewMockPrivateEndpointsClient creates a new mock instance +// NewMockPrivateEndpointsClient creates a new mock instance. func NewMockPrivateEndpointsClient(ctrl *gomock.Controller) *MockPrivateEndpointsClient { mock := &MockPrivateEndpointsClient{ctrl: ctrl} mock.recorder = &MockPrivateEndpointsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPrivateEndpointsClient) EXPECT() *MockPrivateEndpointsClientMockRecorder { return m.recorder } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockPrivateEndpointsClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2 string, arg3 network.PrivateEndpoint) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3) @@ -161,13 +161,13 @@ func (m *MockPrivateEndpointsClient) CreateOrUpdateAndWait(arg0 context.Context, return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockPrivateEndpointsClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockPrivateEndpointsClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockPrivateEndpointsClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2) @@ -175,13 +175,13 @@ func (m *MockPrivateEndpointsClient) DeleteAndWait(arg0 context.Context, arg1, a return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockPrivateEndpointsClientMockRecorder) DeleteAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockPrivateEndpointsClient)(nil).DeleteAndWait), arg0, arg1, arg2) } -// Get mocks base method +// Get mocks base method. func (m *MockPrivateEndpointsClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.PrivateEndpoint, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -190,36 +190,36 @@ func (m *MockPrivateEndpointsClient) Get(arg0 context.Context, arg1, arg2, arg3 return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockPrivateEndpointsClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPrivateEndpointsClient)(nil).Get), arg0, arg1, arg2, arg3) } -// MockPrivateLinkServicesClient is a mock of PrivateLinkServicesClient interface +// MockPrivateLinkServicesClient is a mock of PrivateLinkServicesClient interface. type MockPrivateLinkServicesClient struct { ctrl *gomock.Controller recorder *MockPrivateLinkServicesClientMockRecorder } -// MockPrivateLinkServicesClientMockRecorder is the mock recorder for MockPrivateLinkServicesClient +// MockPrivateLinkServicesClientMockRecorder is the mock recorder for MockPrivateLinkServicesClient. type MockPrivateLinkServicesClientMockRecorder struct { mock *MockPrivateLinkServicesClient } -// NewMockPrivateLinkServicesClient creates a new mock instance +// NewMockPrivateLinkServicesClient creates a new mock instance. func NewMockPrivateLinkServicesClient(ctrl *gomock.Controller) *MockPrivateLinkServicesClient { mock := &MockPrivateLinkServicesClient{ctrl: ctrl} mock.recorder = &MockPrivateLinkServicesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPrivateLinkServicesClient) EXPECT() *MockPrivateLinkServicesClientMockRecorder { return m.recorder } -// DeletePrivateEndpointConnection mocks base method +// DeletePrivateEndpointConnection mocks base method. func (m *MockPrivateLinkServicesClient) DeletePrivateEndpointConnection(arg0 context.Context, arg1, arg2, arg3 string) (network.PrivateLinkServicesDeletePrivateEndpointConnectionFuture, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeletePrivateEndpointConnection", arg0, arg1, arg2, arg3) @@ -228,13 +228,13 @@ func (m *MockPrivateLinkServicesClient) DeletePrivateEndpointConnection(arg0 con return ret0, ret1 } -// DeletePrivateEndpointConnection indicates an expected call of DeletePrivateEndpointConnection +// DeletePrivateEndpointConnection indicates an expected call of DeletePrivateEndpointConnection. func (mr *MockPrivateLinkServicesClientMockRecorder) DeletePrivateEndpointConnection(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePrivateEndpointConnection", reflect.TypeOf((*MockPrivateLinkServicesClient)(nil).DeletePrivateEndpointConnection), arg0, arg1, arg2, arg3) } -// List mocks base method +// List mocks base method. func (m *MockPrivateLinkServicesClient) List(arg0 context.Context, arg1 string) ([]network.PrivateLinkService, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -243,36 +243,36 @@ func (m *MockPrivateLinkServicesClient) List(arg0 context.Context, arg1 string) return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockPrivateLinkServicesClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockPrivateLinkServicesClient)(nil).List), arg0, arg1) } -// MockPublicIPAddressesClient is a mock of PublicIPAddressesClient interface +// MockPublicIPAddressesClient is a mock of PublicIPAddressesClient interface. type MockPublicIPAddressesClient struct { ctrl *gomock.Controller recorder *MockPublicIPAddressesClientMockRecorder } -// MockPublicIPAddressesClientMockRecorder is the mock recorder for MockPublicIPAddressesClient +// MockPublicIPAddressesClientMockRecorder is the mock recorder for MockPublicIPAddressesClient. type MockPublicIPAddressesClientMockRecorder struct { mock *MockPublicIPAddressesClient } -// NewMockPublicIPAddressesClient creates a new mock instance +// NewMockPublicIPAddressesClient creates a new mock instance. func NewMockPublicIPAddressesClient(ctrl *gomock.Controller) *MockPublicIPAddressesClient { mock := &MockPublicIPAddressesClient{ctrl: ctrl} mock.recorder = &MockPublicIPAddressesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPublicIPAddressesClient) EXPECT() *MockPublicIPAddressesClientMockRecorder { return m.recorder } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockPublicIPAddressesClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2) @@ -280,13 +280,13 @@ func (m *MockPublicIPAddressesClient) DeleteAndWait(arg0 context.Context, arg1, return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockPublicIPAddressesClientMockRecorder) DeleteAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockPublicIPAddressesClient)(nil).DeleteAndWait), arg0, arg1, arg2) } -// Get mocks base method +// Get mocks base method. func (m *MockPublicIPAddressesClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.PublicIPAddress, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -295,13 +295,13 @@ func (m *MockPublicIPAddressesClient) Get(arg0 context.Context, arg1, arg2, arg3 return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockPublicIPAddressesClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPublicIPAddressesClient)(nil).Get), arg0, arg1, arg2, arg3) } -// List mocks base method +// List mocks base method. func (m *MockPublicIPAddressesClient) List(arg0 context.Context, arg1 string) ([]network.PublicIPAddress, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -310,36 +310,36 @@ func (m *MockPublicIPAddressesClient) List(arg0 context.Context, arg1 string) ([ return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockPublicIPAddressesClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockPublicIPAddressesClient)(nil).List), arg0, arg1) } -// MockRouteTablesClient is a mock of RouteTablesClient interface +// MockRouteTablesClient is a mock of RouteTablesClient interface. type MockRouteTablesClient struct { ctrl *gomock.Controller recorder *MockRouteTablesClientMockRecorder } -// MockRouteTablesClientMockRecorder is the mock recorder for MockRouteTablesClient +// MockRouteTablesClientMockRecorder is the mock recorder for MockRouteTablesClient. type MockRouteTablesClientMockRecorder struct { mock *MockRouteTablesClient } -// NewMockRouteTablesClient creates a new mock instance +// NewMockRouteTablesClient creates a new mock instance. func NewMockRouteTablesClient(ctrl *gomock.Controller) *MockRouteTablesClient { mock := &MockRouteTablesClient{ctrl: ctrl} mock.recorder = &MockRouteTablesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRouteTablesClient) EXPECT() *MockRouteTablesClientMockRecorder { return m.recorder } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockRouteTablesClient) DeleteAndWait(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2) @@ -347,13 +347,13 @@ func (m *MockRouteTablesClient) DeleteAndWait(arg0 context.Context, arg1, arg2 s return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockRouteTablesClientMockRecorder) DeleteAndWait(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockRouteTablesClient)(nil).DeleteAndWait), arg0, arg1, arg2) } -// Get mocks base method +// Get mocks base method. func (m *MockRouteTablesClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.RouteTable, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -362,36 +362,36 @@ func (m *MockRouteTablesClient) Get(arg0 context.Context, arg1, arg2, arg3 strin return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockRouteTablesClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockRouteTablesClient)(nil).Get), arg0, arg1, arg2, arg3) } -// MockSubnetsClient is a mock of SubnetsClient interface +// MockSubnetsClient is a mock of SubnetsClient interface. type MockSubnetsClient struct { ctrl *gomock.Controller recorder *MockSubnetsClientMockRecorder } -// MockSubnetsClientMockRecorder is the mock recorder for MockSubnetsClient +// MockSubnetsClientMockRecorder is the mock recorder for MockSubnetsClient. type MockSubnetsClientMockRecorder struct { mock *MockSubnetsClient } -// NewMockSubnetsClient creates a new mock instance +// NewMockSubnetsClient creates a new mock instance. func NewMockSubnetsClient(ctrl *gomock.Controller) *MockSubnetsClient { mock := &MockSubnetsClient{ctrl: ctrl} mock.recorder = &MockSubnetsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockSubnetsClient) EXPECT() *MockSubnetsClientMockRecorder { return m.recorder } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockSubnetsClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2, arg3 string, arg4 network.Subnet) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3, arg4) @@ -399,13 +399,13 @@ func (m *MockSubnetsClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, ar return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockSubnetsClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockSubnetsClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3, arg4) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockSubnetsClient) DeleteAndWait(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2, arg3) @@ -413,13 +413,13 @@ func (m *MockSubnetsClient) DeleteAndWait(arg0 context.Context, arg1, arg2, arg3 return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockSubnetsClientMockRecorder) DeleteAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockSubnetsClient)(nil).DeleteAndWait), arg0, arg1, arg2, arg3) } -// Get mocks base method +// Get mocks base method. func (m *MockSubnetsClient) Get(arg0 context.Context, arg1, arg2, arg3, arg4 string) (network.Subnet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4) @@ -428,36 +428,36 @@ func (m *MockSubnetsClient) Get(arg0 context.Context, arg1, arg2, arg3, arg4 str return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockSubnetsClientMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSubnetsClient)(nil).Get), arg0, arg1, arg2, arg3, arg4) } -// MockVirtualNetworksClient is a mock of VirtualNetworksClient interface +// MockVirtualNetworksClient is a mock of VirtualNetworksClient interface. type MockVirtualNetworksClient struct { ctrl *gomock.Controller recorder *MockVirtualNetworksClientMockRecorder } -// MockVirtualNetworksClientMockRecorder is the mock recorder for MockVirtualNetworksClient +// MockVirtualNetworksClientMockRecorder is the mock recorder for MockVirtualNetworksClient. type MockVirtualNetworksClientMockRecorder struct { mock *MockVirtualNetworksClient } -// NewMockVirtualNetworksClient creates a new mock instance +// NewMockVirtualNetworksClient creates a new mock instance. func NewMockVirtualNetworksClient(ctrl *gomock.Controller) *MockVirtualNetworksClient { mock := &MockVirtualNetworksClient{ctrl: ctrl} mock.recorder = &MockVirtualNetworksClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockVirtualNetworksClient) EXPECT() *MockVirtualNetworksClientMockRecorder { return m.recorder } -// CreateOrUpdate mocks base method +// CreateOrUpdate mocks base method. func (m *MockVirtualNetworksClient) CreateOrUpdate(arg0 context.Context, arg1, arg2 string, arg3 network.VirtualNetwork) (network.VirtualNetworksCreateOrUpdateFuture, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2, arg3) @@ -466,13 +466,13 @@ func (m *MockVirtualNetworksClient) CreateOrUpdate(arg0 context.Context, arg1, a return ret0, ret1 } -// CreateOrUpdate indicates an expected call of CreateOrUpdate +// CreateOrUpdate indicates an expected call of CreateOrUpdate. func (mr *MockVirtualNetworksClientMockRecorder) CreateOrUpdate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockVirtualNetworksClient)(nil).CreateOrUpdate), arg0, arg1, arg2, arg3) } -// Get mocks base method +// Get mocks base method. func (m *MockVirtualNetworksClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.VirtualNetwork, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -481,13 +481,13 @@ func (m *MockVirtualNetworksClient) Get(arg0 context.Context, arg1, arg2, arg3 s return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockVirtualNetworksClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualNetworksClient)(nil).Get), arg0, arg1, arg2, arg3) } -// List mocks base method +// List mocks base method. func (m *MockVirtualNetworksClient) List(arg0 context.Context, arg1 string) ([]network.VirtualNetwork, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -496,36 +496,36 @@ func (m *MockVirtualNetworksClient) List(arg0 context.Context, arg1 string) ([]n return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockVirtualNetworksClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockVirtualNetworksClient)(nil).List), arg0, arg1) } -// MockSecurityGroupsClient is a mock of SecurityGroupsClient interface +// MockSecurityGroupsClient is a mock of SecurityGroupsClient interface. type MockSecurityGroupsClient struct { ctrl *gomock.Controller recorder *MockSecurityGroupsClientMockRecorder } -// MockSecurityGroupsClientMockRecorder is the mock recorder for MockSecurityGroupsClient +// MockSecurityGroupsClientMockRecorder is the mock recorder for MockSecurityGroupsClient. type MockSecurityGroupsClientMockRecorder struct { mock *MockSecurityGroupsClient } -// NewMockSecurityGroupsClient creates a new mock instance +// NewMockSecurityGroupsClient creates a new mock instance. func NewMockSecurityGroupsClient(ctrl *gomock.Controller) *MockSecurityGroupsClient { mock := &MockSecurityGroupsClient{ctrl: ctrl} mock.recorder = &MockSecurityGroupsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockSecurityGroupsClient) EXPECT() *MockSecurityGroupsClientMockRecorder { return m.recorder } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockSecurityGroupsClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2 string, arg3 network.SecurityGroup) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3) @@ -533,13 +533,13 @@ func (m *MockSecurityGroupsClient) CreateOrUpdateAndWait(arg0 context.Context, a return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockSecurityGroupsClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockSecurityGroupsClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3) } -// Get mocks base method +// Get mocks base method. func (m *MockSecurityGroupsClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.SecurityGroup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -548,13 +548,13 @@ func (m *MockSecurityGroupsClient) Get(arg0 context.Context, arg1, arg2, arg3 st return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockSecurityGroupsClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSecurityGroupsClient)(nil).Get), arg0, arg1, arg2, arg3) } -// List mocks base method +// List mocks base method. func (m *MockSecurityGroupsClient) List(arg0 context.Context, arg1 string) ([]network.SecurityGroup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1) @@ -563,36 +563,36 @@ func (m *MockSecurityGroupsClient) List(arg0 context.Context, arg1 string) ([]ne return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockSecurityGroupsClientMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockSecurityGroupsClient)(nil).List), arg0, arg1) } -// MockVirtualNetworkPeeringsClient is a mock of VirtualNetworkPeeringsClient interface +// MockVirtualNetworkPeeringsClient is a mock of VirtualNetworkPeeringsClient interface. type MockVirtualNetworkPeeringsClient struct { ctrl *gomock.Controller recorder *MockVirtualNetworkPeeringsClientMockRecorder } -// MockVirtualNetworkPeeringsClientMockRecorder is the mock recorder for MockVirtualNetworkPeeringsClient +// MockVirtualNetworkPeeringsClientMockRecorder is the mock recorder for MockVirtualNetworkPeeringsClient. type MockVirtualNetworkPeeringsClientMockRecorder struct { mock *MockVirtualNetworkPeeringsClient } -// NewMockVirtualNetworkPeeringsClient creates a new mock instance +// NewMockVirtualNetworkPeeringsClient creates a new mock instance. func NewMockVirtualNetworkPeeringsClient(ctrl *gomock.Controller) *MockVirtualNetworkPeeringsClient { mock := &MockVirtualNetworkPeeringsClient{ctrl: ctrl} mock.recorder = &MockVirtualNetworkPeeringsClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockVirtualNetworkPeeringsClient) EXPECT() *MockVirtualNetworkPeeringsClientMockRecorder { return m.recorder } -// CreateOrUpdate mocks base method +// CreateOrUpdate mocks base method. func (m *MockVirtualNetworkPeeringsClient) CreateOrUpdate(arg0 context.Context, arg1, arg2, arg3 string, arg4 network.VirtualNetworkPeering) (network.VirtualNetworkPeeringsCreateOrUpdateFuture, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2, arg3, arg4) @@ -601,13 +601,13 @@ func (m *MockVirtualNetworkPeeringsClient) CreateOrUpdate(arg0 context.Context, return ret0, ret1 } -// CreateOrUpdate indicates an expected call of CreateOrUpdate +// CreateOrUpdate indicates an expected call of CreateOrUpdate. func (mr *MockVirtualNetworkPeeringsClientMockRecorder) CreateOrUpdate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockVirtualNetworkPeeringsClient)(nil).CreateOrUpdate), arg0, arg1, arg2, arg3, arg4) } -// CreateOrUpdateAndWait mocks base method +// CreateOrUpdateAndWait mocks base method. func (m *MockVirtualNetworkPeeringsClient) CreateOrUpdateAndWait(arg0 context.Context, arg1, arg2, arg3 string, arg4 network.VirtualNetworkPeering) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateAndWait", arg0, arg1, arg2, arg3, arg4) @@ -615,13 +615,13 @@ func (m *MockVirtualNetworkPeeringsClient) CreateOrUpdateAndWait(arg0 context.Co return ret0 } -// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait +// CreateOrUpdateAndWait indicates an expected call of CreateOrUpdateAndWait. func (mr *MockVirtualNetworkPeeringsClientMockRecorder) CreateOrUpdateAndWait(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAndWait", reflect.TypeOf((*MockVirtualNetworkPeeringsClient)(nil).CreateOrUpdateAndWait), arg0, arg1, arg2, arg3, arg4) } -// Delete mocks base method +// Delete mocks base method. func (m *MockVirtualNetworkPeeringsClient) Delete(arg0 context.Context, arg1, arg2, arg3 string) (network.VirtualNetworkPeeringsDeleteFuture, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3) @@ -630,13 +630,13 @@ func (m *MockVirtualNetworkPeeringsClient) Delete(arg0 context.Context, arg1, ar return ret0, ret1 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockVirtualNetworkPeeringsClientMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockVirtualNetworkPeeringsClient)(nil).Delete), arg0, arg1, arg2, arg3) } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockVirtualNetworkPeeringsClient) DeleteAndWait(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2, arg3) @@ -644,13 +644,13 @@ func (m *MockVirtualNetworkPeeringsClient) DeleteAndWait(arg0 context.Context, a return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockVirtualNetworkPeeringsClientMockRecorder) DeleteAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockVirtualNetworkPeeringsClient)(nil).DeleteAndWait), arg0, arg1, arg2, arg3) } -// Get mocks base method +// Get mocks base method. func (m *MockVirtualNetworkPeeringsClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (network.VirtualNetworkPeering, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) @@ -659,7 +659,7 @@ func (m *MockVirtualNetworkPeeringsClient) Get(arg0 context.Context, arg1, arg2, return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockVirtualNetworkPeeringsClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualNetworkPeeringsClient)(nil).Get), arg0, arg1, arg2, arg3) diff --git a/pkg/util/mocks/azureclient/mgmt/privatedns/privatedns.go b/pkg/util/mocks/azureclient/mgmt/privatedns/privatedns.go index c7b65a85ef7..475dfe5786b 100644 --- a/pkg/util/mocks/azureclient/mgmt/privatedns/privatedns.go +++ b/pkg/util/mocks/azureclient/mgmt/privatedns/privatedns.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockPrivateZonesClient is a mock of PrivateZonesClient interface +// MockPrivateZonesClient is a mock of PrivateZonesClient interface. type MockPrivateZonesClient struct { ctrl *gomock.Controller recorder *MockPrivateZonesClientMockRecorder } -// MockPrivateZonesClientMockRecorder is the mock recorder for MockPrivateZonesClient +// MockPrivateZonesClientMockRecorder is the mock recorder for MockPrivateZonesClient. type MockPrivateZonesClientMockRecorder struct { mock *MockPrivateZonesClient } -// NewMockPrivateZonesClient creates a new mock instance +// NewMockPrivateZonesClient creates a new mock instance. func NewMockPrivateZonesClient(ctrl *gomock.Controller) *MockPrivateZonesClient { mock := &MockPrivateZonesClient{ctrl: ctrl} mock.recorder = &MockPrivateZonesClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPrivateZonesClient) EXPECT() *MockPrivateZonesClientMockRecorder { return m.recorder } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockPrivateZonesClient) DeleteAndWait(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2, arg3) @@ -43,13 +43,13 @@ func (m *MockPrivateZonesClient) DeleteAndWait(arg0 context.Context, arg1, arg2, return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockPrivateZonesClientMockRecorder) DeleteAndWait(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockPrivateZonesClient)(nil).DeleteAndWait), arg0, arg1, arg2, arg3) } -// ListByResourceGroup mocks base method +// ListByResourceGroup mocks base method. func (m *MockPrivateZonesClient) ListByResourceGroup(arg0 context.Context, arg1 string, arg2 *int32) ([]privatedns.PrivateZone, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListByResourceGroup", arg0, arg1, arg2) @@ -58,36 +58,36 @@ func (m *MockPrivateZonesClient) ListByResourceGroup(arg0 context.Context, arg1 return ret0, ret1 } -// ListByResourceGroup indicates an expected call of ListByResourceGroup +// ListByResourceGroup indicates an expected call of ListByResourceGroup. func (mr *MockPrivateZonesClientMockRecorder) ListByResourceGroup(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockPrivateZonesClient)(nil).ListByResourceGroup), arg0, arg1, arg2) } -// MockVirtualNetworkLinksClient is a mock of VirtualNetworkLinksClient interface +// MockVirtualNetworkLinksClient is a mock of VirtualNetworkLinksClient interface. type MockVirtualNetworkLinksClient struct { ctrl *gomock.Controller recorder *MockVirtualNetworkLinksClientMockRecorder } -// MockVirtualNetworkLinksClientMockRecorder is the mock recorder for MockVirtualNetworkLinksClient +// MockVirtualNetworkLinksClientMockRecorder is the mock recorder for MockVirtualNetworkLinksClient. type MockVirtualNetworkLinksClientMockRecorder struct { mock *MockVirtualNetworkLinksClient } -// NewMockVirtualNetworkLinksClient creates a new mock instance +// NewMockVirtualNetworkLinksClient creates a new mock instance. func NewMockVirtualNetworkLinksClient(ctrl *gomock.Controller) *MockVirtualNetworkLinksClient { mock := &MockVirtualNetworkLinksClient{ctrl: ctrl} mock.recorder = &MockVirtualNetworkLinksClientMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockVirtualNetworkLinksClient) EXPECT() *MockVirtualNetworkLinksClientMockRecorder { return m.recorder } -// DeleteAndWait mocks base method +// DeleteAndWait mocks base method. func (m *MockVirtualNetworkLinksClient) DeleteAndWait(arg0 context.Context, arg1, arg2, arg3, arg4 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteAndWait", arg0, arg1, arg2, arg3, arg4) @@ -95,13 +95,13 @@ func (m *MockVirtualNetworkLinksClient) DeleteAndWait(arg0 context.Context, arg1 return ret0 } -// DeleteAndWait indicates an expected call of DeleteAndWait +// DeleteAndWait indicates an expected call of DeleteAndWait. func (mr *MockVirtualNetworkLinksClientMockRecorder) DeleteAndWait(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAndWait", reflect.TypeOf((*MockVirtualNetworkLinksClient)(nil).DeleteAndWait), arg0, arg1, arg2, arg3, arg4) } -// List mocks base method +// List mocks base method. func (m *MockVirtualNetworkLinksClient) List(arg0 context.Context, arg1, arg2 string, arg3 *int32) ([]privatedns.VirtualNetworkLink, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1, arg2, arg3) @@ -110,7 +110,7 @@ func (m *MockVirtualNetworkLinksClient) List(arg0 context.Context, arg1, arg2 st return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockVirtualNetworkLinksClientMockRecorder) List(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockVirtualNetworkLinksClient)(nil).List), arg0, arg1, arg2, arg3) diff --git a/pkg/util/mocks/billing/billing.go b/pkg/util/mocks/billing/billing.go index 7cd1be18c81..ee570a80d63 100644 --- a/pkg/util/mocks/billing/billing.go +++ b/pkg/util/mocks/billing/billing.go @@ -13,30 +13,30 @@ import ( api "github.com/Azure/ARO-RP/pkg/api" ) -// MockManager is a mock of Manager interface +// MockManager is a mock of Manager interface. type MockManager struct { ctrl *gomock.Controller recorder *MockManagerMockRecorder } -// MockManagerMockRecorder is the mock recorder for MockManager +// MockManagerMockRecorder is the mock recorder for MockManager. type MockManagerMockRecorder struct { mock *MockManager } -// NewMockManager creates a new mock instance +// NewMockManager creates a new mock instance. func NewMockManager(ctrl *gomock.Controller) *MockManager { mock := &MockManager{ctrl: ctrl} mock.recorder = &MockManagerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// Delete mocks base method +// Delete mocks base method. func (m *MockManager) Delete(arg0 context.Context, arg1 *api.OpenShiftClusterDocument) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1) @@ -44,13 +44,13 @@ func (m *MockManager) Delete(arg0 context.Context, arg1 *api.OpenShiftClusterDoc return ret0 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockManagerMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockManager)(nil).Delete), arg0, arg1) } -// Ensure mocks base method +// Ensure mocks base method. func (m *MockManager) Ensure(arg0 context.Context, arg1 *api.OpenShiftClusterDocument, arg2 *api.SubscriptionDocument) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Ensure", arg0, arg1, arg2) @@ -58,7 +58,7 @@ func (m *MockManager) Ensure(arg0 context.Context, arg1 *api.OpenShiftClusterDoc return ret0 } -// Ensure indicates an expected call of Ensure +// Ensure indicates an expected call of Ensure. func (mr *MockManagerMockRecorder) Ensure(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ensure", reflect.TypeOf((*MockManager)(nil).Ensure), arg0, arg1, arg2) diff --git a/pkg/util/mocks/cluster/cluster.go b/pkg/util/mocks/cluster/cluster.go index 8fa3e1a384e..9acdc856c21 100644 --- a/pkg/util/mocks/cluster/cluster.go +++ b/pkg/util/mocks/cluster/cluster.go @@ -11,30 +11,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockInterface is a mock of Interface interface +// MockInterface is a mock of Interface interface. type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface +// MockInterfaceMockRecorder is the mock recorder for MockInterface. type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance +// NewMockInterface creates a new mock instance. func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// AdminUpdate mocks base method +// AdminUpdate mocks base method. func (m *MockInterface) AdminUpdate(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AdminUpdate", arg0) @@ -42,13 +42,13 @@ func (m *MockInterface) AdminUpdate(arg0 context.Context) error { return ret0 } -// AdminUpdate indicates an expected call of AdminUpdate +// AdminUpdate indicates an expected call of AdminUpdate. func (mr *MockInterfaceMockRecorder) AdminUpdate(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AdminUpdate", reflect.TypeOf((*MockInterface)(nil).AdminUpdate), arg0) } -// Delete mocks base method +// Delete mocks base method. func (m *MockInterface) Delete(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0) @@ -56,13 +56,13 @@ func (m *MockInterface) Delete(arg0 context.Context) error { return ret0 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockInterfaceMockRecorder) Delete(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), arg0) } -// Install mocks base method +// Install mocks base method. func (m *MockInterface) Install(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Install", arg0) @@ -70,13 +70,13 @@ func (m *MockInterface) Install(arg0 context.Context) error { return ret0 } -// Install indicates an expected call of Install +// Install indicates an expected call of Install. func (mr *MockInterfaceMockRecorder) Install(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockInterface)(nil).Install), arg0) } -// Update mocks base method +// Update mocks base method. func (m *MockInterface) Update(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Update", arg0) @@ -84,7 +84,7 @@ func (m *MockInterface) Update(arg0 context.Context) error { return ret0 } -// Update indicates an expected call of Update +// Update indicates an expected call of Update. func (mr *MockInterfaceMockRecorder) Update(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), arg0) diff --git a/pkg/util/mocks/dns/dns.go b/pkg/util/mocks/dns/dns.go index 2c0fb05134e..785b86f4317 100644 --- a/pkg/util/mocks/dns/dns.go +++ b/pkg/util/mocks/dns/dns.go @@ -13,30 +13,30 @@ import ( api "github.com/Azure/ARO-RP/pkg/api" ) -// MockManager is a mock of Manager interface +// MockManager is a mock of Manager interface. type MockManager struct { ctrl *gomock.Controller recorder *MockManagerMockRecorder } -// MockManagerMockRecorder is the mock recorder for MockManager +// MockManagerMockRecorder is the mock recorder for MockManager. type MockManagerMockRecorder struct { mock *MockManager } -// NewMockManager creates a new mock instance +// NewMockManager creates a new mock instance. func NewMockManager(ctrl *gomock.Controller) *MockManager { mock := &MockManager{ctrl: ctrl} mock.recorder = &MockManagerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// Create mocks base method +// Create mocks base method. func (m *MockManager) Create(arg0 context.Context, arg1 *api.OpenShiftCluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Create", arg0, arg1) @@ -44,13 +44,13 @@ func (m *MockManager) Create(arg0 context.Context, arg1 *api.OpenShiftCluster) e return ret0 } -// Create indicates an expected call of Create +// Create indicates an expected call of Create. func (mr *MockManagerMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockManager)(nil).Create), arg0, arg1) } -// CreateOrUpdateRouter mocks base method +// CreateOrUpdateRouter mocks base method. func (m *MockManager) CreateOrUpdateRouter(arg0 context.Context, arg1 *api.OpenShiftCluster, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdateRouter", arg0, arg1, arg2) @@ -58,13 +58,13 @@ func (m *MockManager) CreateOrUpdateRouter(arg0 context.Context, arg1 *api.OpenS return ret0 } -// CreateOrUpdateRouter indicates an expected call of CreateOrUpdateRouter +// CreateOrUpdateRouter indicates an expected call of CreateOrUpdateRouter. func (mr *MockManagerMockRecorder) CreateOrUpdateRouter(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateRouter", reflect.TypeOf((*MockManager)(nil).CreateOrUpdateRouter), arg0, arg1, arg2) } -// Delete mocks base method +// Delete mocks base method. func (m *MockManager) Delete(arg0 context.Context, arg1 *api.OpenShiftCluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1) @@ -72,13 +72,13 @@ func (m *MockManager) Delete(arg0 context.Context, arg1 *api.OpenShiftCluster) e return ret0 } -// Delete indicates an expected call of Delete +// Delete indicates an expected call of Delete. func (mr *MockManagerMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockManager)(nil).Delete), arg0, arg1) } -// Update mocks base method +// Update mocks base method. func (m *MockManager) Update(arg0 context.Context, arg1 *api.OpenShiftCluster, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Update", arg0, arg1, arg2) @@ -86,7 +86,7 @@ func (m *MockManager) Update(arg0 context.Context, arg1 *api.OpenShiftCluster, a return ret0 } -// Update indicates an expected call of Update +// Update indicates an expected call of Update. func (mr *MockManagerMockRecorder) Update(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockManager)(nil).Update), arg0, arg1, arg2) diff --git a/pkg/util/mocks/dynamichelper/dynamichelper.go b/pkg/util/mocks/dynamichelper/dynamichelper.go index d9411f60a60..8ccc696bdbb 100644 --- a/pkg/util/mocks/dynamichelper/dynamichelper.go +++ b/pkg/util/mocks/dynamichelper/dynamichelper.go @@ -12,30 +12,30 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// MockInterface is a mock of Interface interface +// MockInterface is a mock of Interface interface. type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface +// MockInterfaceMockRecorder is the mock recorder for MockInterface. type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance +// NewMockInterface creates a new mock instance. func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// Ensure mocks base method +// Ensure mocks base method. func (m *MockInterface) Ensure(arg0 context.Context, arg1 ...runtime.Object) error { m.ctrl.T.Helper() varargs := []interface{}{arg0} @@ -47,14 +47,14 @@ func (m *MockInterface) Ensure(arg0 context.Context, arg1 ...runtime.Object) err return ret0 } -// Ensure indicates an expected call of Ensure +// Ensure indicates an expected call of Ensure. func (mr *MockInterfaceMockRecorder) Ensure(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ensure", reflect.TypeOf((*MockInterface)(nil).Ensure), varargs...) } -// EnsureDeleted mocks base method +// EnsureDeleted mocks base method. func (m *MockInterface) EnsureDeleted(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnsureDeleted", arg0, arg1, arg2, arg3) @@ -62,13 +62,13 @@ func (m *MockInterface) EnsureDeleted(arg0 context.Context, arg1, arg2, arg3 str return ret0 } -// EnsureDeleted indicates an expected call of EnsureDeleted +// EnsureDeleted indicates an expected call of EnsureDeleted. func (mr *MockInterfaceMockRecorder) EnsureDeleted(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureDeleted", reflect.TypeOf((*MockInterface)(nil).EnsureDeleted), arg0, arg1, arg2, arg3) } -// Refresh mocks base method +// Refresh mocks base method. func (m *MockInterface) Refresh() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Refresh") @@ -76,7 +76,7 @@ func (m *MockInterface) Refresh() error { return ret0 } -// Refresh indicates an expected call of Refresh +// Refresh indicates an expected call of Refresh. func (mr *MockInterfaceMockRecorder) Refresh() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Refresh", reflect.TypeOf((*MockInterface)(nil).Refresh)) diff --git a/pkg/util/mocks/env/env.go b/pkg/util/mocks/env/env.go index 427b3ee3d3f..c2fcc831865 100644 --- a/pkg/util/mocks/env/env.go +++ b/pkg/util/mocks/env/env.go @@ -21,30 +21,30 @@ import ( refreshable "github.com/Azure/ARO-RP/pkg/util/refreshable" ) -// MockCore is a mock of Core interface +// MockCore is a mock of Core interface. type MockCore struct { ctrl *gomock.Controller recorder *MockCoreMockRecorder } -// MockCoreMockRecorder is the mock recorder for MockCore +// MockCoreMockRecorder is the mock recorder for MockCore. type MockCoreMockRecorder struct { mock *MockCore } -// NewMockCore creates a new mock instance +// NewMockCore creates a new mock instance. func NewMockCore(ctrl *gomock.Controller) *MockCore { mock := &MockCore{ctrl: ctrl} mock.recorder = &MockCoreMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockCore) EXPECT() *MockCoreMockRecorder { return m.recorder } -// Environment mocks base method +// Environment mocks base method. func (m *MockCore) Environment() *azure.Environment { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Environment") @@ -52,13 +52,13 @@ func (m *MockCore) Environment() *azure.Environment { return ret0 } -// Environment indicates an expected call of Environment +// Environment indicates an expected call of Environment. func (mr *MockCoreMockRecorder) Environment() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Environment", reflect.TypeOf((*MockCore)(nil).Environment)) } -// Hostname mocks base method +// Hostname mocks base method. func (m *MockCore) Hostname() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Hostname") @@ -66,13 +66,13 @@ func (m *MockCore) Hostname() string { return ret0 } -// Hostname indicates an expected call of Hostname +// Hostname indicates an expected call of Hostname. func (mr *MockCoreMockRecorder) Hostname() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hostname", reflect.TypeOf((*MockCore)(nil).Hostname)) } -// IsDevelopmentMode mocks base method +// IsDevelopmentMode mocks base method. func (m *MockCore) IsDevelopmentMode() bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IsDevelopmentMode") @@ -80,13 +80,13 @@ func (m *MockCore) IsDevelopmentMode() bool { return ret0 } -// IsDevelopmentMode indicates an expected call of IsDevelopmentMode +// IsDevelopmentMode indicates an expected call of IsDevelopmentMode. func (mr *MockCoreMockRecorder) IsDevelopmentMode() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDevelopmentMode", reflect.TypeOf((*MockCore)(nil).IsDevelopmentMode)) } -// Location mocks base method +// Location mocks base method. func (m *MockCore) Location() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Location") @@ -94,13 +94,13 @@ func (m *MockCore) Location() string { return ret0 } -// Location indicates an expected call of Location +// Location indicates an expected call of Location. func (mr *MockCoreMockRecorder) Location() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockCore)(nil).Location)) } -// NewRPAuthorizer mocks base method +// NewRPAuthorizer mocks base method. func (m *MockCore) NewRPAuthorizer(arg0 string) (autorest.Authorizer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewRPAuthorizer", arg0) @@ -109,13 +109,13 @@ func (m *MockCore) NewRPAuthorizer(arg0 string) (autorest.Authorizer, error) { return ret0, ret1 } -// NewRPAuthorizer indicates an expected call of NewRPAuthorizer +// NewRPAuthorizer indicates an expected call of NewRPAuthorizer. func (mr *MockCoreMockRecorder) NewRPAuthorizer(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRPAuthorizer", reflect.TypeOf((*MockCore)(nil).NewRPAuthorizer), arg0) } -// ResourceGroup mocks base method +// ResourceGroup mocks base method. func (m *MockCore) ResourceGroup() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ResourceGroup") @@ -123,13 +123,13 @@ func (m *MockCore) ResourceGroup() string { return ret0 } -// ResourceGroup indicates an expected call of ResourceGroup +// ResourceGroup indicates an expected call of ResourceGroup. func (mr *MockCoreMockRecorder) ResourceGroup() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroup", reflect.TypeOf((*MockCore)(nil).ResourceGroup)) } -// SubscriptionID mocks base method +// SubscriptionID mocks base method. func (m *MockCore) SubscriptionID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SubscriptionID") @@ -137,13 +137,13 @@ func (m *MockCore) SubscriptionID() string { return ret0 } -// SubscriptionID indicates an expected call of SubscriptionID +// SubscriptionID indicates an expected call of SubscriptionID. func (mr *MockCoreMockRecorder) SubscriptionID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscriptionID", reflect.TypeOf((*MockCore)(nil).SubscriptionID)) } -// TenantID mocks base method +// TenantID mocks base method. func (m *MockCore) TenantID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "TenantID") @@ -151,36 +151,36 @@ func (m *MockCore) TenantID() string { return ret0 } -// TenantID indicates an expected call of TenantID +// TenantID indicates an expected call of TenantID. func (mr *MockCoreMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockCore)(nil).TenantID)) } -// MockInterface is a mock of Interface interface +// MockInterface is a mock of Interface interface. type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface +// MockInterfaceMockRecorder is the mock recorder for MockInterface. type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance +// NewMockInterface creates a new mock instance. func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// ACRDomain mocks base method +// ACRDomain mocks base method. func (m *MockInterface) ACRDomain() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ACRDomain") @@ -188,13 +188,13 @@ func (m *MockInterface) ACRDomain() string { return ret0 } -// ACRDomain indicates an expected call of ACRDomain +// ACRDomain indicates an expected call of ACRDomain. func (mr *MockInterfaceMockRecorder) ACRDomain() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ACRDomain", reflect.TypeOf((*MockInterface)(nil).ACRDomain)) } -// ACRResourceID mocks base method +// ACRResourceID mocks base method. func (m *MockInterface) ACRResourceID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ACRResourceID") @@ -202,13 +202,13 @@ func (m *MockInterface) ACRResourceID() string { return ret0 } -// ACRResourceID indicates an expected call of ACRResourceID +// ACRResourceID indicates an expected call of ACRResourceID. func (mr *MockInterfaceMockRecorder) ACRResourceID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ACRResourceID", reflect.TypeOf((*MockInterface)(nil).ACRResourceID)) } -// AROOperatorImage mocks base method +// AROOperatorImage mocks base method. func (m *MockInterface) AROOperatorImage() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AROOperatorImage") @@ -216,13 +216,13 @@ func (m *MockInterface) AROOperatorImage() string { return ret0 } -// AROOperatorImage indicates an expected call of AROOperatorImage +// AROOperatorImage indicates an expected call of AROOperatorImage. func (mr *MockInterfaceMockRecorder) AROOperatorImage() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AROOperatorImage", reflect.TypeOf((*MockInterface)(nil).AROOperatorImage)) } -// AdminClientAuthorizer mocks base method +// AdminClientAuthorizer mocks base method. func (m *MockInterface) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AdminClientAuthorizer") @@ -230,13 +230,13 @@ func (m *MockInterface) AdminClientAuthorizer() clientauthorizer.ClientAuthorize return ret0 } -// AdminClientAuthorizer indicates an expected call of AdminClientAuthorizer +// AdminClientAuthorizer indicates an expected call of AdminClientAuthorizer. func (mr *MockInterfaceMockRecorder) AdminClientAuthorizer() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AdminClientAuthorizer", reflect.TypeOf((*MockInterface)(nil).AdminClientAuthorizer)) } -// ArmClientAuthorizer mocks base method +// ArmClientAuthorizer mocks base method. func (m *MockInterface) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ArmClientAuthorizer") @@ -244,13 +244,13 @@ func (m *MockInterface) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer return ret0 } -// ArmClientAuthorizer indicates an expected call of ArmClientAuthorizer +// ArmClientAuthorizer indicates an expected call of ArmClientAuthorizer. func (mr *MockInterfaceMockRecorder) ArmClientAuthorizer() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ArmClientAuthorizer", reflect.TypeOf((*MockInterface)(nil).ArmClientAuthorizer)) } -// ClusterGenevaLoggingConfigVersion mocks base method +// ClusterGenevaLoggingConfigVersion mocks base method. func (m *MockInterface) ClusterGenevaLoggingConfigVersion() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClusterGenevaLoggingConfigVersion") @@ -258,13 +258,13 @@ func (m *MockInterface) ClusterGenevaLoggingConfigVersion() string { return ret0 } -// ClusterGenevaLoggingConfigVersion indicates an expected call of ClusterGenevaLoggingConfigVersion +// ClusterGenevaLoggingConfigVersion indicates an expected call of ClusterGenevaLoggingConfigVersion. func (mr *MockInterfaceMockRecorder) ClusterGenevaLoggingConfigVersion() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterGenevaLoggingConfigVersion", reflect.TypeOf((*MockInterface)(nil).ClusterGenevaLoggingConfigVersion)) } -// ClusterGenevaLoggingEnvironment mocks base method +// ClusterGenevaLoggingEnvironment mocks base method. func (m *MockInterface) ClusterGenevaLoggingEnvironment() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClusterGenevaLoggingEnvironment") @@ -272,13 +272,13 @@ func (m *MockInterface) ClusterGenevaLoggingEnvironment() string { return ret0 } -// ClusterGenevaLoggingEnvironment indicates an expected call of ClusterGenevaLoggingEnvironment +// ClusterGenevaLoggingEnvironment indicates an expected call of ClusterGenevaLoggingEnvironment. func (mr *MockInterfaceMockRecorder) ClusterGenevaLoggingEnvironment() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterGenevaLoggingEnvironment", reflect.TypeOf((*MockInterface)(nil).ClusterGenevaLoggingEnvironment)) } -// ClusterGenevaLoggingSecret mocks base method +// ClusterGenevaLoggingSecret mocks base method. func (m *MockInterface) ClusterGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClusterGenevaLoggingSecret") @@ -287,13 +287,13 @@ func (m *MockInterface) ClusterGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Cer return ret0, ret1 } -// ClusterGenevaLoggingSecret indicates an expected call of ClusterGenevaLoggingSecret +// ClusterGenevaLoggingSecret indicates an expected call of ClusterGenevaLoggingSecret. func (mr *MockInterfaceMockRecorder) ClusterGenevaLoggingSecret() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterGenevaLoggingSecret", reflect.TypeOf((*MockInterface)(nil).ClusterGenevaLoggingSecret)) } -// ClusterKeyvault mocks base method +// ClusterKeyvault mocks base method. func (m *MockInterface) ClusterKeyvault() keyvault.Manager { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClusterKeyvault") @@ -301,13 +301,13 @@ func (m *MockInterface) ClusterKeyvault() keyvault.Manager { return ret0 } -// ClusterKeyvault indicates an expected call of ClusterKeyvault +// ClusterKeyvault indicates an expected call of ClusterKeyvault. func (mr *MockInterfaceMockRecorder) ClusterKeyvault() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterKeyvault", reflect.TypeOf((*MockInterface)(nil).ClusterKeyvault)) } -// DialContext mocks base method +// DialContext mocks base method. func (m *MockInterface) DialContext(arg0 context.Context, arg1, arg2 string) (net.Conn, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DialContext", arg0, arg1, arg2) @@ -316,13 +316,13 @@ func (m *MockInterface) DialContext(arg0 context.Context, arg1, arg2 string) (ne return ret0, ret1 } -// DialContext indicates an expected call of DialContext +// DialContext indicates an expected call of DialContext. func (mr *MockInterfaceMockRecorder) DialContext(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialContext", reflect.TypeOf((*MockInterface)(nil).DialContext), arg0, arg1, arg2) } -// Domain mocks base method +// Domain mocks base method. func (m *MockInterface) Domain() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Domain") @@ -330,13 +330,13 @@ func (m *MockInterface) Domain() string { return ret0 } -// Domain indicates an expected call of Domain +// Domain indicates an expected call of Domain. func (mr *MockInterfaceMockRecorder) Domain() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Domain", reflect.TypeOf((*MockInterface)(nil).Domain)) } -// EnsureARMResourceGroupRoleAssignment mocks base method +// EnsureARMResourceGroupRoleAssignment mocks base method. func (m *MockInterface) EnsureARMResourceGroupRoleAssignment(arg0 context.Context, arg1 refreshable.Authorizer, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnsureARMResourceGroupRoleAssignment", arg0, arg1, arg2) @@ -344,13 +344,13 @@ func (m *MockInterface) EnsureARMResourceGroupRoleAssignment(arg0 context.Contex return ret0 } -// EnsureARMResourceGroupRoleAssignment indicates an expected call of EnsureARMResourceGroupRoleAssignment +// EnsureARMResourceGroupRoleAssignment indicates an expected call of EnsureARMResourceGroupRoleAssignment. func (mr *MockInterfaceMockRecorder) EnsureARMResourceGroupRoleAssignment(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureARMResourceGroupRoleAssignment", reflect.TypeOf((*MockInterface)(nil).EnsureARMResourceGroupRoleAssignment), arg0, arg1, arg2) } -// Environment mocks base method +// Environment mocks base method. func (m *MockInterface) Environment() *azure.Environment { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Environment") @@ -358,13 +358,13 @@ func (m *MockInterface) Environment() *azure.Environment { return ret0 } -// Environment indicates an expected call of Environment +// Environment indicates an expected call of Environment. func (mr *MockInterfaceMockRecorder) Environment() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Environment", reflect.TypeOf((*MockInterface)(nil).Environment)) } -// FPAuthorizer mocks base method +// FPAuthorizer mocks base method. func (m *MockInterface) FPAuthorizer(arg0, arg1 string) (refreshable.Authorizer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FPAuthorizer", arg0, arg1) @@ -373,13 +373,13 @@ func (m *MockInterface) FPAuthorizer(arg0, arg1 string) (refreshable.Authorizer, return ret0, ret1 } -// FPAuthorizer indicates an expected call of FPAuthorizer +// FPAuthorizer indicates an expected call of FPAuthorizer. func (mr *MockInterfaceMockRecorder) FPAuthorizer(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FPAuthorizer", reflect.TypeOf((*MockInterface)(nil).FPAuthorizer), arg0, arg1) } -// FeatureIsSet mocks base method +// FeatureIsSet mocks base method. func (m *MockInterface) FeatureIsSet(arg0 env.Feature) bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FeatureIsSet", arg0) @@ -387,13 +387,13 @@ func (m *MockInterface) FeatureIsSet(arg0 env.Feature) bool { return ret0 } -// FeatureIsSet indicates an expected call of FeatureIsSet +// FeatureIsSet indicates an expected call of FeatureIsSet. func (mr *MockInterfaceMockRecorder) FeatureIsSet(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeatureIsSet", reflect.TypeOf((*MockInterface)(nil).FeatureIsSet), arg0) } -// Hostname mocks base method +// Hostname mocks base method. func (m *MockInterface) Hostname() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Hostname") @@ -401,13 +401,13 @@ func (m *MockInterface) Hostname() string { return ret0 } -// Hostname indicates an expected call of Hostname +// Hostname indicates an expected call of Hostname. func (mr *MockInterfaceMockRecorder) Hostname() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hostname", reflect.TypeOf((*MockInterface)(nil).Hostname)) } -// InitializeAuthorizers mocks base method +// InitializeAuthorizers mocks base method. func (m *MockInterface) InitializeAuthorizers() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InitializeAuthorizers") @@ -415,13 +415,13 @@ func (m *MockInterface) InitializeAuthorizers() error { return ret0 } -// InitializeAuthorizers indicates an expected call of InitializeAuthorizers +// InitializeAuthorizers indicates an expected call of InitializeAuthorizers. func (mr *MockInterfaceMockRecorder) InitializeAuthorizers() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeAuthorizers", reflect.TypeOf((*MockInterface)(nil).InitializeAuthorizers)) } -// IsDevelopmentMode mocks base method +// IsDevelopmentMode mocks base method. func (m *MockInterface) IsDevelopmentMode() bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IsDevelopmentMode") @@ -429,13 +429,13 @@ func (m *MockInterface) IsDevelopmentMode() bool { return ret0 } -// IsDevelopmentMode indicates an expected call of IsDevelopmentMode +// IsDevelopmentMode indicates an expected call of IsDevelopmentMode. func (mr *MockInterfaceMockRecorder) IsDevelopmentMode() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDevelopmentMode", reflect.TypeOf((*MockInterface)(nil).IsDevelopmentMode)) } -// Listen mocks base method +// Listen mocks base method. func (m *MockInterface) Listen() (net.Listener, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Listen") @@ -444,13 +444,13 @@ func (m *MockInterface) Listen() (net.Listener, error) { return ret0, ret1 } -// Listen indicates an expected call of Listen +// Listen indicates an expected call of Listen. func (mr *MockInterfaceMockRecorder) Listen() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Listen", reflect.TypeOf((*MockInterface)(nil).Listen)) } -// Location mocks base method +// Location mocks base method. func (m *MockInterface) Location() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Location") @@ -458,13 +458,13 @@ func (m *MockInterface) Location() string { return ret0 } -// Location indicates an expected call of Location +// Location indicates an expected call of Location. func (mr *MockInterfaceMockRecorder) Location() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockInterface)(nil).Location)) } -// NewRPAuthorizer mocks base method +// NewRPAuthorizer mocks base method. func (m *MockInterface) NewRPAuthorizer(arg0 string) (autorest.Authorizer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewRPAuthorizer", arg0) @@ -473,13 +473,13 @@ func (m *MockInterface) NewRPAuthorizer(arg0 string) (autorest.Authorizer, error return ret0, ret1 } -// NewRPAuthorizer indicates an expected call of NewRPAuthorizer +// NewRPAuthorizer indicates an expected call of NewRPAuthorizer. func (mr *MockInterfaceMockRecorder) NewRPAuthorizer(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRPAuthorizer", reflect.TypeOf((*MockInterface)(nil).NewRPAuthorizer), arg0) } -// ResourceGroup mocks base method +// ResourceGroup mocks base method. func (m *MockInterface) ResourceGroup() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ResourceGroup") @@ -487,13 +487,13 @@ func (m *MockInterface) ResourceGroup() string { return ret0 } -// ResourceGroup indicates an expected call of ResourceGroup +// ResourceGroup indicates an expected call of ResourceGroup. func (mr *MockInterfaceMockRecorder) ResourceGroup() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroup", reflect.TypeOf((*MockInterface)(nil).ResourceGroup)) } -// ServiceKeyvault mocks base method +// ServiceKeyvault mocks base method. func (m *MockInterface) ServiceKeyvault() keyvault.Manager { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ServiceKeyvault") @@ -501,13 +501,13 @@ func (m *MockInterface) ServiceKeyvault() keyvault.Manager { return ret0 } -// ServiceKeyvault indicates an expected call of ServiceKeyvault +// ServiceKeyvault indicates an expected call of ServiceKeyvault. func (mr *MockInterfaceMockRecorder) ServiceKeyvault() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceKeyvault", reflect.TypeOf((*MockInterface)(nil).ServiceKeyvault)) } -// SubscriptionID mocks base method +// SubscriptionID mocks base method. func (m *MockInterface) SubscriptionID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SubscriptionID") @@ -515,13 +515,13 @@ func (m *MockInterface) SubscriptionID() string { return ret0 } -// SubscriptionID indicates an expected call of SubscriptionID +// SubscriptionID indicates an expected call of SubscriptionID. func (mr *MockInterfaceMockRecorder) SubscriptionID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscriptionID", reflect.TypeOf((*MockInterface)(nil).SubscriptionID)) } -// TenantID mocks base method +// TenantID mocks base method. func (m *MockInterface) TenantID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "TenantID") @@ -529,13 +529,13 @@ func (m *MockInterface) TenantID() string { return ret0 } -// TenantID indicates an expected call of TenantID +// TenantID indicates an expected call of TenantID. func (mr *MockInterfaceMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockInterface)(nil).TenantID)) } -// Zones mocks base method +// Zones mocks base method. func (m *MockInterface) Zones(arg0 string) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Zones", arg0) @@ -544,7 +544,7 @@ func (m *MockInterface) Zones(arg0 string) ([]string, error) { return ret0, ret1 } -// Zones indicates an expected call of Zones +// Zones indicates an expected call of Zones. func (mr *MockInterfaceMockRecorder) Zones(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Zones", reflect.TypeOf((*MockInterface)(nil).Zones), arg0) diff --git a/pkg/util/mocks/graph/graph.go b/pkg/util/mocks/graph/graph.go index 051a7476230..1c528dbb716 100644 --- a/pkg/util/mocks/graph/graph.go +++ b/pkg/util/mocks/graph/graph.go @@ -13,30 +13,30 @@ import ( graph "github.com/Azure/ARO-RP/pkg/cluster/graph" ) -// MockManager is a mock of Manager interface +// MockManager is a mock of Manager interface. type MockManager struct { ctrl *gomock.Controller recorder *MockManagerMockRecorder } -// MockManagerMockRecorder is the mock recorder for MockManager +// MockManagerMockRecorder is the mock recorder for MockManager. type MockManagerMockRecorder struct { mock *MockManager } -// NewMockManager creates a new mock instance +// NewMockManager creates a new mock instance. func NewMockManager(ctrl *gomock.Controller) *MockManager { mock := &MockManager{ctrl: ctrl} mock.recorder = &MockManagerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// Exists mocks base method +// Exists mocks base method. func (m *MockManager) Exists(arg0 context.Context, arg1, arg2 string) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Exists", arg0, arg1, arg2) @@ -45,13 +45,13 @@ func (m *MockManager) Exists(arg0 context.Context, arg1, arg2 string) (bool, err return ret0, ret1 } -// Exists indicates an expected call of Exists +// Exists indicates an expected call of Exists. func (mr *MockManagerMockRecorder) Exists(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockManager)(nil).Exists), arg0, arg1, arg2) } -// LoadPersisted mocks base method +// LoadPersisted mocks base method. func (m *MockManager) LoadPersisted(arg0 context.Context, arg1, arg2 string) (graph.PersistedGraph, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LoadPersisted", arg0, arg1, arg2) @@ -60,13 +60,13 @@ func (m *MockManager) LoadPersisted(arg0 context.Context, arg1, arg2 string) (gr return ret0, ret1 } -// LoadPersisted indicates an expected call of LoadPersisted +// LoadPersisted indicates an expected call of LoadPersisted. func (mr *MockManagerMockRecorder) LoadPersisted(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadPersisted", reflect.TypeOf((*MockManager)(nil).LoadPersisted), arg0, arg1, arg2) } -// Save mocks base method +// Save mocks base method. func (m *MockManager) Save(arg0 context.Context, arg1, arg2 string, arg3 graph.Graph) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Save", arg0, arg1, arg2, arg3) @@ -74,7 +74,7 @@ func (m *MockManager) Save(arg0 context.Context, arg1, arg2 string, arg3 graph.G return ret0 } -// Save indicates an expected call of Save +// Save indicates an expected call of Save. func (mr *MockManagerMockRecorder) Save(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Save", reflect.TypeOf((*MockManager)(nil).Save), arg0, arg1, arg2, arg3) diff --git a/pkg/util/mocks/instancemetadata/instancemetadata.go b/pkg/util/mocks/instancemetadata/instancemetadata.go index b5bad8cb634..cdf39427506 100644 --- a/pkg/util/mocks/instancemetadata/instancemetadata.go +++ b/pkg/util/mocks/instancemetadata/instancemetadata.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockServicePrincipalToken is a mock of ServicePrincipalToken interface +// MockServicePrincipalToken is a mock of ServicePrincipalToken interface. type MockServicePrincipalToken struct { ctrl *gomock.Controller recorder *MockServicePrincipalTokenMockRecorder } -// MockServicePrincipalTokenMockRecorder is the mock recorder for MockServicePrincipalToken +// MockServicePrincipalTokenMockRecorder is the mock recorder for MockServicePrincipalToken. type MockServicePrincipalTokenMockRecorder struct { mock *MockServicePrincipalToken } -// NewMockServicePrincipalToken creates a new mock instance +// NewMockServicePrincipalToken creates a new mock instance. func NewMockServicePrincipalToken(ctrl *gomock.Controller) *MockServicePrincipalToken { mock := &MockServicePrincipalToken{ctrl: ctrl} mock.recorder = &MockServicePrincipalTokenMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockServicePrincipalToken) EXPECT() *MockServicePrincipalTokenMockRecorder { return m.recorder } -// OAuthToken mocks base method +// OAuthToken mocks base method. func (m *MockServicePrincipalToken) OAuthToken() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "OAuthToken") @@ -43,13 +43,13 @@ func (m *MockServicePrincipalToken) OAuthToken() string { return ret0 } -// OAuthToken indicates an expected call of OAuthToken +// OAuthToken indicates an expected call of OAuthToken. func (mr *MockServicePrincipalTokenMockRecorder) OAuthToken() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OAuthToken", reflect.TypeOf((*MockServicePrincipalToken)(nil).OAuthToken)) } -// RefreshWithContext mocks base method +// RefreshWithContext mocks base method. func (m *MockServicePrincipalToken) RefreshWithContext(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RefreshWithContext", arg0) @@ -57,36 +57,36 @@ func (m *MockServicePrincipalToken) RefreshWithContext(arg0 context.Context) err return ret0 } -// RefreshWithContext indicates an expected call of RefreshWithContext +// RefreshWithContext indicates an expected call of RefreshWithContext. func (mr *MockServicePrincipalTokenMockRecorder) RefreshWithContext(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshWithContext", reflect.TypeOf((*MockServicePrincipalToken)(nil).RefreshWithContext), arg0) } -// MockInstanceMetadata is a mock of InstanceMetadata interface +// MockInstanceMetadata is a mock of InstanceMetadata interface. type MockInstanceMetadata struct { ctrl *gomock.Controller recorder *MockInstanceMetadataMockRecorder } -// MockInstanceMetadataMockRecorder is the mock recorder for MockInstanceMetadata +// MockInstanceMetadataMockRecorder is the mock recorder for MockInstanceMetadata. type MockInstanceMetadataMockRecorder struct { mock *MockInstanceMetadata } -// NewMockInstanceMetadata creates a new mock instance +// NewMockInstanceMetadata creates a new mock instance. func NewMockInstanceMetadata(ctrl *gomock.Controller) *MockInstanceMetadata { mock := &MockInstanceMetadata{ctrl: ctrl} mock.recorder = &MockInstanceMetadataMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInstanceMetadata) EXPECT() *MockInstanceMetadataMockRecorder { return m.recorder } -// Environment mocks base method +// Environment mocks base method. func (m *MockInstanceMetadata) Environment() *azure.Environment { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Environment") @@ -94,13 +94,13 @@ func (m *MockInstanceMetadata) Environment() *azure.Environment { return ret0 } -// Environment indicates an expected call of Environment +// Environment indicates an expected call of Environment. func (mr *MockInstanceMetadataMockRecorder) Environment() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Environment", reflect.TypeOf((*MockInstanceMetadata)(nil).Environment)) } -// Hostname mocks base method +// Hostname mocks base method. func (m *MockInstanceMetadata) Hostname() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Hostname") @@ -108,13 +108,13 @@ func (m *MockInstanceMetadata) Hostname() string { return ret0 } -// Hostname indicates an expected call of Hostname +// Hostname indicates an expected call of Hostname. func (mr *MockInstanceMetadataMockRecorder) Hostname() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hostname", reflect.TypeOf((*MockInstanceMetadata)(nil).Hostname)) } -// Location mocks base method +// Location mocks base method. func (m *MockInstanceMetadata) Location() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Location") @@ -122,13 +122,13 @@ func (m *MockInstanceMetadata) Location() string { return ret0 } -// Location indicates an expected call of Location +// Location indicates an expected call of Location. func (mr *MockInstanceMetadataMockRecorder) Location() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockInstanceMetadata)(nil).Location)) } -// ResourceGroup mocks base method +// ResourceGroup mocks base method. func (m *MockInstanceMetadata) ResourceGroup() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ResourceGroup") @@ -136,13 +136,13 @@ func (m *MockInstanceMetadata) ResourceGroup() string { return ret0 } -// ResourceGroup indicates an expected call of ResourceGroup +// ResourceGroup indicates an expected call of ResourceGroup. func (mr *MockInstanceMetadataMockRecorder) ResourceGroup() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroup", reflect.TypeOf((*MockInstanceMetadata)(nil).ResourceGroup)) } -// SubscriptionID mocks base method +// SubscriptionID mocks base method. func (m *MockInstanceMetadata) SubscriptionID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SubscriptionID") @@ -150,13 +150,13 @@ func (m *MockInstanceMetadata) SubscriptionID() string { return ret0 } -// SubscriptionID indicates an expected call of SubscriptionID +// SubscriptionID indicates an expected call of SubscriptionID. func (mr *MockInstanceMetadataMockRecorder) SubscriptionID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscriptionID", reflect.TypeOf((*MockInstanceMetadata)(nil).SubscriptionID)) } -// TenantID mocks base method +// TenantID mocks base method. func (m *MockInstanceMetadata) TenantID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "TenantID") @@ -164,7 +164,7 @@ func (m *MockInstanceMetadata) TenantID() string { return ret0 } -// TenantID indicates an expected call of TenantID +// TenantID indicates an expected call of TenantID. func (mr *MockInstanceMetadataMockRecorder) TenantID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockInstanceMetadata)(nil).TenantID)) diff --git a/pkg/util/mocks/keyvault/keyvault.go b/pkg/util/mocks/keyvault/keyvault.go index 3f68494903d..b66675369bd 100644 --- a/pkg/util/mocks/keyvault/keyvault.go +++ b/pkg/util/mocks/keyvault/keyvault.go @@ -16,30 +16,30 @@ import ( keyvault "github.com/Azure/ARO-RP/pkg/util/keyvault" ) -// MockManager is a mock of Manager interface +// MockManager is a mock of Manager interface. type MockManager struct { ctrl *gomock.Controller recorder *MockManagerMockRecorder } -// MockManagerMockRecorder is the mock recorder for MockManager +// MockManagerMockRecorder is the mock recorder for MockManager. type MockManagerMockRecorder struct { mock *MockManager } -// NewMockManager creates a new mock instance +// NewMockManager creates a new mock instance. func NewMockManager(ctrl *gomock.Controller) *MockManager { mock := &MockManager{ctrl: ctrl} mock.recorder = &MockManagerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// CreateSignedCertificate mocks base method +// CreateSignedCertificate mocks base method. func (m *MockManager) CreateSignedCertificate(arg0 context.Context, arg1 keyvault.Issuer, arg2, arg3 string, arg4 keyvault.Eku) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateSignedCertificate", arg0, arg1, arg2, arg3, arg4) @@ -47,13 +47,13 @@ func (m *MockManager) CreateSignedCertificate(arg0 context.Context, arg1 keyvaul return ret0 } -// CreateSignedCertificate indicates an expected call of CreateSignedCertificate +// CreateSignedCertificate indicates an expected call of CreateSignedCertificate. func (mr *MockManagerMockRecorder) CreateSignedCertificate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSignedCertificate", reflect.TypeOf((*MockManager)(nil).CreateSignedCertificate), arg0, arg1, arg2, arg3, arg4) } -// EnsureCertificateDeleted mocks base method +// EnsureCertificateDeleted mocks base method. func (m *MockManager) EnsureCertificateDeleted(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnsureCertificateDeleted", arg0, arg1) @@ -61,13 +61,13 @@ func (m *MockManager) EnsureCertificateDeleted(arg0 context.Context, arg1 string return ret0 } -// EnsureCertificateDeleted indicates an expected call of EnsureCertificateDeleted +// EnsureCertificateDeleted indicates an expected call of EnsureCertificateDeleted. func (mr *MockManagerMockRecorder) EnsureCertificateDeleted(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureCertificateDeleted", reflect.TypeOf((*MockManager)(nil).EnsureCertificateDeleted), arg0, arg1) } -// GetBase64Secret mocks base method +// GetBase64Secret mocks base method. func (m *MockManager) GetBase64Secret(arg0 context.Context, arg1 string) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetBase64Secret", arg0, arg1) @@ -76,13 +76,13 @@ func (m *MockManager) GetBase64Secret(arg0 context.Context, arg1 string) ([]byte return ret0, ret1 } -// GetBase64Secret indicates an expected call of GetBase64Secret +// GetBase64Secret indicates an expected call of GetBase64Secret. func (mr *MockManagerMockRecorder) GetBase64Secret(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBase64Secret", reflect.TypeOf((*MockManager)(nil).GetBase64Secret), arg0, arg1) } -// GetCertificateSecret mocks base method +// GetCertificateSecret mocks base method. func (m *MockManager) GetCertificateSecret(arg0 context.Context, arg1 string) (*rsa.PrivateKey, []*x509.Certificate, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCertificateSecret", arg0, arg1) @@ -92,13 +92,13 @@ func (m *MockManager) GetCertificateSecret(arg0 context.Context, arg1 string) (* return ret0, ret1, ret2 } -// GetCertificateSecret indicates an expected call of GetCertificateSecret +// GetCertificateSecret indicates an expected call of GetCertificateSecret. func (mr *MockManagerMockRecorder) GetCertificateSecret(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCertificateSecret", reflect.TypeOf((*MockManager)(nil).GetCertificateSecret), arg0, arg1) } -// GetSecret mocks base method +// GetSecret mocks base method. func (m *MockManager) GetSecret(arg0 context.Context, arg1 string) (keyvault0.SecretBundle, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSecret", arg0, arg1) @@ -107,13 +107,13 @@ func (m *MockManager) GetSecret(arg0 context.Context, arg1 string) (keyvault0.Se return ret0, ret1 } -// GetSecret indicates an expected call of GetSecret +// GetSecret indicates an expected call of GetSecret. func (mr *MockManagerMockRecorder) GetSecret(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecret", reflect.TypeOf((*MockManager)(nil).GetSecret), arg0, arg1) } -// GetSecrets mocks base method +// GetSecrets mocks base method. func (m *MockManager) GetSecrets(arg0 context.Context) ([]keyvault0.SecretItem, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSecrets", arg0) @@ -122,13 +122,13 @@ func (m *MockManager) GetSecrets(arg0 context.Context) ([]keyvault0.SecretItem, return ret0, ret1 } -// GetSecrets indicates an expected call of GetSecrets +// GetSecrets indicates an expected call of GetSecrets. func (mr *MockManagerMockRecorder) GetSecrets(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecrets", reflect.TypeOf((*MockManager)(nil).GetSecrets), arg0) } -// SetSecret mocks base method +// SetSecret mocks base method. func (m *MockManager) SetSecret(arg0 context.Context, arg1 string, arg2 keyvault0.SecretSetParameters) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetSecret", arg0, arg1, arg2) @@ -136,13 +136,13 @@ func (m *MockManager) SetSecret(arg0 context.Context, arg1 string, arg2 keyvault return ret0 } -// SetSecret indicates an expected call of SetSecret +// SetSecret indicates an expected call of SetSecret. func (mr *MockManagerMockRecorder) SetSecret(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSecret", reflect.TypeOf((*MockManager)(nil).SetSecret), arg0, arg1, arg2) } -// WaitForCertificateOperation mocks base method +// WaitForCertificateOperation mocks base method. func (m *MockManager) WaitForCertificateOperation(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WaitForCertificateOperation", arg0, arg1) @@ -150,7 +150,7 @@ func (m *MockManager) WaitForCertificateOperation(arg0 context.Context, arg1 str return ret0 } -// WaitForCertificateOperation indicates an expected call of WaitForCertificateOperation +// WaitForCertificateOperation indicates an expected call of WaitForCertificateOperation. func (mr *MockManagerMockRecorder) WaitForCertificateOperation(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForCertificateOperation", reflect.TypeOf((*MockManager)(nil).WaitForCertificateOperation), arg0, arg1) diff --git a/pkg/util/mocks/metrics/metrics.go b/pkg/util/mocks/metrics/metrics.go index c4f7d5a5e55..1c2c3c036d8 100644 --- a/pkg/util/mocks/metrics/metrics.go +++ b/pkg/util/mocks/metrics/metrics.go @@ -10,48 +10,48 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockInterface is a mock of Interface interface +// MockInterface is a mock of Interface interface. type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface +// MockInterfaceMockRecorder is the mock recorder for MockInterface. type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance +// NewMockInterface creates a new mock instance. func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// EmitFloat mocks base method +// EmitFloat mocks base method. func (m *MockInterface) EmitFloat(arg0 string, arg1 float64, arg2 map[string]string) { m.ctrl.T.Helper() m.ctrl.Call(m, "EmitFloat", arg0, arg1, arg2) } -// EmitFloat indicates an expected call of EmitFloat +// EmitFloat indicates an expected call of EmitFloat. func (mr *MockInterfaceMockRecorder) EmitFloat(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EmitFloat", reflect.TypeOf((*MockInterface)(nil).EmitFloat), arg0, arg1, arg2) } -// EmitGauge mocks base method +// EmitGauge mocks base method. func (m *MockInterface) EmitGauge(arg0 string, arg1 int64, arg2 map[string]string) { m.ctrl.T.Helper() m.ctrl.Call(m, "EmitGauge", arg0, arg1, arg2) } -// EmitGauge indicates an expected call of EmitGauge +// EmitGauge indicates an expected call of EmitGauge. func (mr *MockInterfaceMockRecorder) EmitGauge(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EmitGauge", reflect.TypeOf((*MockInterface)(nil).EmitGauge), arg0, arg1, arg2) diff --git a/pkg/util/mocks/operator/controllers/workaround/workaround.go b/pkg/util/mocks/operator/controllers/workaround/workaround.go index 2fa4627f127..04f21b23677 100644 --- a/pkg/util/mocks/operator/controllers/workaround/workaround.go +++ b/pkg/util/mocks/operator/controllers/workaround/workaround.go @@ -13,30 +13,30 @@ import ( version "github.com/Azure/ARO-RP/pkg/util/version" ) -// MockWorkaround is a mock of Workaround interface +// MockWorkaround is a mock of Workaround interface. type MockWorkaround struct { ctrl *gomock.Controller recorder *MockWorkaroundMockRecorder } -// MockWorkaroundMockRecorder is the mock recorder for MockWorkaround +// MockWorkaroundMockRecorder is the mock recorder for MockWorkaround. type MockWorkaroundMockRecorder struct { mock *MockWorkaround } -// NewMockWorkaround creates a new mock instance +// NewMockWorkaround creates a new mock instance. func NewMockWorkaround(ctrl *gomock.Controller) *MockWorkaround { mock := &MockWorkaround{ctrl: ctrl} mock.recorder = &MockWorkaroundMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockWorkaround) EXPECT() *MockWorkaroundMockRecorder { return m.recorder } -// Ensure mocks base method +// Ensure mocks base method. func (m *MockWorkaround) Ensure(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Ensure", arg0) @@ -44,13 +44,13 @@ func (m *MockWorkaround) Ensure(arg0 context.Context) error { return ret0 } -// Ensure indicates an expected call of Ensure +// Ensure indicates an expected call of Ensure. func (mr *MockWorkaroundMockRecorder) Ensure(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ensure", reflect.TypeOf((*MockWorkaround)(nil).Ensure), arg0) } -// IsRequired mocks base method +// IsRequired mocks base method. func (m *MockWorkaround) IsRequired(arg0 *version.Version) bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IsRequired", arg0) @@ -58,13 +58,13 @@ func (m *MockWorkaround) IsRequired(arg0 *version.Version) bool { return ret0 } -// IsRequired indicates an expected call of IsRequired +// IsRequired indicates an expected call of IsRequired. func (mr *MockWorkaroundMockRecorder) IsRequired(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsRequired", reflect.TypeOf((*MockWorkaround)(nil).IsRequired), arg0) } -// Name mocks base method +// Name mocks base method. func (m *MockWorkaround) Name() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Name") @@ -72,13 +72,13 @@ func (m *MockWorkaround) Name() string { return ret0 } -// Name indicates an expected call of Name +// Name indicates an expected call of Name. func (mr *MockWorkaroundMockRecorder) Name() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockWorkaround)(nil).Name)) } -// Remove mocks base method +// Remove mocks base method. func (m *MockWorkaround) Remove(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Remove", arg0) @@ -86,7 +86,7 @@ func (m *MockWorkaround) Remove(arg0 context.Context) error { return ret0 } -// Remove indicates an expected call of Remove +// Remove indicates an expected call of Remove. func (mr *MockWorkaroundMockRecorder) Remove(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockWorkaround)(nil).Remove), arg0) diff --git a/pkg/util/mocks/proxy/proxy.go b/pkg/util/mocks/proxy/proxy.go index 9d5ff202beb..3a0de4b6314 100644 --- a/pkg/util/mocks/proxy/proxy.go +++ b/pkg/util/mocks/proxy/proxy.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockDialer is a mock of Dialer interface +// MockDialer is a mock of Dialer interface. type MockDialer struct { ctrl *gomock.Controller recorder *MockDialerMockRecorder } -// MockDialerMockRecorder is the mock recorder for MockDialer +// MockDialerMockRecorder is the mock recorder for MockDialer. type MockDialerMockRecorder struct { mock *MockDialer } -// NewMockDialer creates a new mock instance +// NewMockDialer creates a new mock instance. func NewMockDialer(ctrl *gomock.Controller) *MockDialer { mock := &MockDialer{ctrl: ctrl} mock.recorder = &MockDialerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockDialer) EXPECT() *MockDialerMockRecorder { return m.recorder } -// DialContext mocks base method +// DialContext mocks base method. func (m *MockDialer) DialContext(arg0 context.Context, arg1, arg2 string) (net.Conn, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DialContext", arg0, arg1, arg2) @@ -44,7 +44,7 @@ func (m *MockDialer) DialContext(arg0 context.Context, arg1, arg2 string) (net.C return ret0, ret1 } -// DialContext indicates an expected call of DialContext +// DialContext indicates an expected call of DialContext. func (mr *MockDialerMockRecorder) DialContext(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialContext", reflect.TypeOf((*MockDialer)(nil).DialContext), arg0, arg1, arg2) diff --git a/pkg/util/mocks/refreshable/refreshable.go b/pkg/util/mocks/refreshable/refreshable.go index ad0ed768ab1..fdbc3b06cb7 100644 --- a/pkg/util/mocks/refreshable/refreshable.go +++ b/pkg/util/mocks/refreshable/refreshable.go @@ -13,30 +13,30 @@ import ( logrus "github.com/sirupsen/logrus" ) -// MockAuthorizer is a mock of Authorizer interface +// MockAuthorizer is a mock of Authorizer interface. type MockAuthorizer struct { ctrl *gomock.Controller recorder *MockAuthorizerMockRecorder } -// MockAuthorizerMockRecorder is the mock recorder for MockAuthorizer +// MockAuthorizerMockRecorder is the mock recorder for MockAuthorizer. type MockAuthorizerMockRecorder struct { mock *MockAuthorizer } -// NewMockAuthorizer creates a new mock instance +// NewMockAuthorizer creates a new mock instance. func NewMockAuthorizer(ctrl *gomock.Controller) *MockAuthorizer { mock := &MockAuthorizer{ctrl: ctrl} mock.recorder = &MockAuthorizerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockAuthorizer) EXPECT() *MockAuthorizerMockRecorder { return m.recorder } -// OAuthToken mocks base method +// OAuthToken mocks base method. func (m *MockAuthorizer) OAuthToken() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "OAuthToken") @@ -44,13 +44,13 @@ func (m *MockAuthorizer) OAuthToken() string { return ret0 } -// OAuthToken indicates an expected call of OAuthToken +// OAuthToken indicates an expected call of OAuthToken. func (mr *MockAuthorizerMockRecorder) OAuthToken() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OAuthToken", reflect.TypeOf((*MockAuthorizer)(nil).OAuthToken)) } -// RefreshWithContext mocks base method +// RefreshWithContext mocks base method. func (m *MockAuthorizer) RefreshWithContext(arg0 context.Context, arg1 *logrus.Entry) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RefreshWithContext", arg0, arg1) @@ -59,13 +59,13 @@ func (m *MockAuthorizer) RefreshWithContext(arg0 context.Context, arg1 *logrus.E return ret0, ret1 } -// RefreshWithContext indicates an expected call of RefreshWithContext +// RefreshWithContext indicates an expected call of RefreshWithContext. func (mr *MockAuthorizerMockRecorder) RefreshWithContext(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshWithContext", reflect.TypeOf((*MockAuthorizer)(nil).RefreshWithContext), arg0, arg1) } -// WithAuthorization mocks base method +// WithAuthorization mocks base method. func (m *MockAuthorizer) WithAuthorization() autorest.PrepareDecorator { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WithAuthorization") @@ -73,7 +73,7 @@ func (m *MockAuthorizer) WithAuthorization() autorest.PrepareDecorator { return ret0 } -// WithAuthorization indicates an expected call of WithAuthorization +// WithAuthorization indicates an expected call of WithAuthorization. func (mr *MockAuthorizerMockRecorder) WithAuthorization() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithAuthorization", reflect.TypeOf((*MockAuthorizer)(nil).WithAuthorization)) diff --git a/pkg/util/mocks/storage/storage.go b/pkg/util/mocks/storage/storage.go index 0d23ec071db..244bac29778 100644 --- a/pkg/util/mocks/storage/storage.go +++ b/pkg/util/mocks/storage/storage.go @@ -13,30 +13,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockManager is a mock of Manager interface +// MockManager is a mock of Manager interface. type MockManager struct { ctrl *gomock.Controller recorder *MockManagerMockRecorder } -// MockManagerMockRecorder is the mock recorder for MockManager +// MockManagerMockRecorder is the mock recorder for MockManager. type MockManagerMockRecorder struct { mock *MockManager } -// NewMockManager creates a new mock instance +// NewMockManager creates a new mock instance. func NewMockManager(ctrl *gomock.Controller) *MockManager { mock := &MockManager{ctrl: ctrl} mock.recorder = &MockManagerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// BlobService mocks base method +// BlobService mocks base method. func (m *MockManager) BlobService(arg0 context.Context, arg1, arg2 string, arg3 storage.Permissions, arg4 storage.SignedResourceTypes) (*storage0.BlobStorageClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BlobService", arg0, arg1, arg2, arg3, arg4) @@ -45,7 +45,7 @@ func (m *MockManager) BlobService(arg0 context.Context, arg1, arg2 string, arg3 return ret0, ret1 } -// BlobService indicates an expected call of BlobService +// BlobService indicates an expected call of BlobService. func (mr *MockManagerMockRecorder) BlobService(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlobService", reflect.TypeOf((*MockManager)(nil).BlobService), arg0, arg1, arg2, arg3, arg4) diff --git a/pkg/util/mocks/subnet/subnet.go b/pkg/util/mocks/subnet/subnet.go index 1fd16eb1761..fc64b4c40fc 100644 --- a/pkg/util/mocks/subnet/subnet.go +++ b/pkg/util/mocks/subnet/subnet.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockManager is a mock of Manager interface +// MockManager is a mock of Manager interface. type MockManager struct { ctrl *gomock.Controller recorder *MockManagerMockRecorder } -// MockManagerMockRecorder is the mock recorder for MockManager +// MockManagerMockRecorder is the mock recorder for MockManager. type MockManagerMockRecorder struct { mock *MockManager } -// NewMockManager creates a new mock instance +// NewMockManager creates a new mock instance. func NewMockManager(ctrl *gomock.Controller) *MockManager { mock := &MockManager{ctrl: ctrl} mock.recorder = &MockManagerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// CreateOrUpdate mocks base method +// CreateOrUpdate mocks base method. func (m *MockManager) CreateOrUpdate(arg0 context.Context, arg1 string, arg2 *network.Subnet) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2) @@ -43,13 +43,13 @@ func (m *MockManager) CreateOrUpdate(arg0 context.Context, arg1 string, arg2 *ne return ret0 } -// CreateOrUpdate indicates an expected call of CreateOrUpdate +// CreateOrUpdate indicates an expected call of CreateOrUpdate. func (mr *MockManagerMockRecorder) CreateOrUpdate(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockManager)(nil).CreateOrUpdate), arg0, arg1, arg2) } -// Get mocks base method +// Get mocks base method. func (m *MockManager) Get(arg0 context.Context, arg1 string) (*network.Subnet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1) @@ -58,13 +58,13 @@ func (m *MockManager) Get(arg0 context.Context, arg1 string) (*network.Subnet, e return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockManagerMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockManager)(nil).Get), arg0, arg1) } -// GetHighestFreeIP mocks base method +// GetHighestFreeIP mocks base method. func (m *MockManager) GetHighestFreeIP(arg0 context.Context, arg1 string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetHighestFreeIP", arg0, arg1) @@ -73,7 +73,7 @@ func (m *MockManager) GetHighestFreeIP(arg0 context.Context, arg1 string) (strin return ret0, ret1 } -// GetHighestFreeIP indicates an expected call of GetHighestFreeIP +// GetHighestFreeIP indicates an expected call of GetHighestFreeIP. func (mr *MockManagerMockRecorder) GetHighestFreeIP(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestFreeIP", reflect.TypeOf((*MockManager)(nil).GetHighestFreeIP), arg0, arg1) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE deleted file mode 100644 index 047555ec7e5..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2020 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt new file mode 100644 index 00000000000..ccb63b16673 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go index a54eb9fce7d..7aecf79430a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go @@ -1,18 +1,7 @@ // +build go1.9 -// Copyright 2021 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // This code was auto-generated by: // github.com/Azure/azure-sdk-for-go/tools/profileBuilder diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md index b2c69b7ede8..38bd319c1ad 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/CHANGELOG.md @@ -1,5 +1,75 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/compute/resource-manager/readme.md tag: `package-2018-10-01` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *ContainerServicesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ContainerServicesDeleteFuture.UnmarshalJSON([]byte) error +1. *DisksCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DisksDeleteFuture.UnmarshalJSON([]byte) error +1. *DisksGrantAccessFuture.UnmarshalJSON([]byte) error +1. *DisksRevokeAccessFuture.UnmarshalJSON([]byte) error +1. *DisksUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleriesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleriesDeleteFuture.UnmarshalJSON([]byte) error +1. *GalleryImageVersionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryImageVersionsDeleteFuture.UnmarshalJSON([]byte) error +1. *GalleryImagesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryImagesDeleteFuture.UnmarshalJSON([]byte) error +1. *ImagesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ImagesDeleteFuture.UnmarshalJSON([]byte) error +1. *ImagesUpdateFuture.UnmarshalJSON([]byte) error +1. *LogAnalyticsExportRequestRateByIntervalFuture.UnmarshalJSON([]byte) error +1. *LogAnalyticsExportThrottledRequestsFuture.UnmarshalJSON([]byte) error +1. *SnapshotsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SnapshotsDeleteFuture.UnmarshalJSON([]byte) error +1. *SnapshotsGrantAccessFuture.UnmarshalJSON([]byte) error +1. *SnapshotsRevokeAccessFuture.UnmarshalJSON([]byte) error +1. *SnapshotsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineExtensionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineExtensionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineExtensionsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetExtensionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetRollingUpgradesCancelFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsDeallocateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsPerformMaintenanceFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsPowerOffFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsRedeployFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsReimageAllFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsReimageFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsRestartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsRunCommandFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsStartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsDeallocateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsDeleteInstancesFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsPerformMaintenanceFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsPowerOffFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsRedeployFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsReimageAllFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsReimageFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsRestartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsStartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsUpdateInstancesFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesCaptureFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesConvertToManagedDisksFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesDeallocateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesPerformMaintenanceFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesPowerOffFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesRedeployFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesReimageFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesRestartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesRunCommandFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesStartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesUpdateFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/_meta.json new file mode 100644 index 00000000000..5f205a8afa9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", + "tag": "package-2018-10-01", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2018-10-01 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go index 2dc4fb8d762..8c5fed69319 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go index d9d0430190e..1812f27feb9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go @@ -3,19 +3,8 @@ // Compute Client package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go index 23e5e17e7de..d27f363dab6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -140,30 +129,7 @@ func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ContainerServicesClient) (cs ContainerService, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cs.Response.Response, err = future.GetResult(sender) - if cs.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { - cs, err = client.CreateOrUpdateResponder(cs.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -244,20 +210,7 @@ func (client ContainerServicesClient) DeleteSender(req *http.Request) (future Co var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ContainerServicesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go index 7bff8a8090d..0b69e93dbd8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -131,30 +120,7 @@ func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksC var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (d Disk, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - d.Response.Response, err = future.GetResult(sender) - if d.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.CreateOrUpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -234,20 +200,7 @@ func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -413,30 +366,7 @@ func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGran var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (au AccessURI, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - au.Response.Response, err = future.GetResult(sender) - if au.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", nil, "received nil response and error") - } - if err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -745,20 +675,7 @@ func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRev var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -840,30 +757,7 @@ func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (d Disk, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - d.Response.Response, err = future.GetResult(sender) - if d.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.UpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go index 93b13fac844..4ff08cdd53b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/enums.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go index 7083c628cf4..7c738a3c457 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future Ga var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleriesClient) (g Gallery, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - g.Response.Response, err = future.GetResult(sender) - if g.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && g.Response.Response.StatusCode != http.StatusNoContent { - g, err = client.CreateOrUpdateResponder(g.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesD var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleriesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go index aa0a7f8e63b..953838211ee 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImagesClient) (gi GalleryImage, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gi.Response.Response, err = future.GetResult(sender) - if gi.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { - gi, err = client.CreateOrUpdateResponder(gi.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client GalleryImagesClient) DeleteSender(req *http.Request) (future Galler var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImagesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go index 0c66ba83944..5f327efa544 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -121,30 +110,7 @@ func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - giv.Response.Response, err = future.GetResult(sender) - if giv.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { - giv, err = client.CreateOrUpdateResponder(giv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImageVersionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go index e0d0e089830..dd8cdf42d61 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future Image var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.CreateOrUpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImagesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.UpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go index cb6c906657f..79c4adecc34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -112,30 +101,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - laor.Response.Response, err = future.GetResult(sender) - if laor.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", nil, "received nil response and error") - } - if err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -221,30 +187,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - laor.Response.Response, err = future.GetResult(sender) - if laor.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", nil, "received nil response and error") - } - if err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go index af95666d786..a09e6f22187 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -918,6 +907,39 @@ type ContainerServicesCreateOrUpdateFuture struct { Result func(ContainerServicesClient) (ContainerService, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ContainerServicesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ContainerServicesCreateOrUpdateFuture.Result. +func (future *ContainerServicesCreateOrUpdateFuture) result(client ContainerServicesClient) (cs ContainerService, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { + cs, err = client.CreateOrUpdateResponder(cs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") + } + } + return +} + // ContainerServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ContainerServicesDeleteFuture struct { @@ -927,6 +949,33 @@ type ContainerServicesDeleteFuture struct { Result func(ContainerServicesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ContainerServicesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ContainerServicesDeleteFuture.Result. +func (future *ContainerServicesDeleteFuture) result(client ContainerServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ContainerServiceServicePrincipalProfile information about a service principal identity for the cluster // to use for manipulating Azure APIs. type ContainerServiceServicePrincipalProfile struct { @@ -1405,6 +1454,39 @@ type DisksCreateOrUpdateFuture struct { Result func(DisksClient) (Disk, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksCreateOrUpdateFuture.Result. +func (future *DisksCreateOrUpdateFuture) result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + // DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type DisksDeleteFuture struct { azure.FutureAPI @@ -1413,6 +1495,33 @@ type DisksDeleteFuture struct { Result func(DisksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksDeleteFuture.Result. +func (future *DisksDeleteFuture) result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DisksGrantAccessFuture struct { @@ -1422,6 +1531,39 @@ type DisksGrantAccessFuture struct { Result func(DisksClient) (AccessURI, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksGrantAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksGrantAccessFuture.Result. +func (future *DisksGrantAccessFuture) result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + // DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. type DiskSku struct { // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS', 'UltraSSDLRS' @@ -1448,6 +1590,33 @@ type DisksRevokeAccessFuture struct { Result func(DisksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksRevokeAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksRevokeAccessFuture.Result. +func (future *DisksRevokeAccessFuture) result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + // DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type DisksUpdateFuture struct { azure.FutureAPI @@ -1456,6 +1625,39 @@ type DisksUpdateFuture struct { Result func(DisksClient) (Disk, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksUpdateFuture.Result. +func (future *DisksUpdateFuture) result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + // DiskUpdate disk update resource. type DiskUpdate struct { *DiskUpdateProperties `json:"properties,omitempty"` @@ -1554,6 +1756,39 @@ type GalleriesCreateOrUpdateFuture struct { Result func(GalleriesClient) (Gallery, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesCreateOrUpdateFuture.Result. +func (future *GalleriesCreateOrUpdateFuture) result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.CreateOrUpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + // GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type GalleriesDeleteFuture struct { @@ -1563,6 +1798,33 @@ type GalleriesDeleteFuture struct { Result func(GalleriesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesDeleteFuture.Result. +func (future *GalleriesDeleteFuture) result(client GalleriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // Gallery specifies information about the Shared Image Gallery that you want to create or update. type Gallery struct { autorest.Response `json:"-"` @@ -2040,6 +2302,39 @@ type GalleryImagesCreateOrUpdateFuture struct { Result func(GalleryImagesClient) (GalleryImage, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesCreateOrUpdateFuture.Result. +func (future *GalleryImagesCreateOrUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.CreateOrUpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type GalleryImagesDeleteFuture struct { @@ -2049,6 +2344,33 @@ type GalleryImagesDeleteFuture struct { Result func(GalleryImagesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesDeleteFuture.Result. +func (future *GalleryImagesDeleteFuture) result(client GalleryImagesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // GalleryImageVersion specifies information about the gallery Image Version that you want to create or // update. type GalleryImageVersion struct { @@ -2374,6 +2696,39 @@ type GalleryImageVersionsCreateOrUpdateFuture struct { Result func(GalleryImageVersionsClient) (GalleryImageVersion, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImageVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImageVersionsCreateOrUpdateFuture.Result. +func (future *GalleryImageVersionsCreateOrUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.CreateOrUpdateResponder(giv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type GalleryImageVersionsDeleteFuture struct { @@ -2383,6 +2738,33 @@ type GalleryImageVersionsDeleteFuture struct { Result func(GalleryImageVersionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImageVersionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImageVersionsDeleteFuture.Result. +func (future *GalleryImageVersionsDeleteFuture) result(client GalleryImageVersionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // GalleryImageVersionStorageProfile this is the storage profile of a gallery Image Version. type GalleryImageVersionStorageProfile struct { // OsDiskImage - READ-ONLY @@ -2959,6 +3341,39 @@ type ImagesCreateOrUpdateFuture struct { Result func(ImagesClient) (Image, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImagesCreateOrUpdateFuture.Result. +func (future *ImagesCreateOrUpdateFuture) result(client ImagesClient) (i Image, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.CreateOrUpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type ImagesDeleteFuture struct { azure.FutureAPI @@ -2967,6 +3382,33 @@ type ImagesDeleteFuture struct { Result func(ImagesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImagesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImagesDeleteFuture.Result. +func (future *ImagesDeleteFuture) result(client ImagesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ImageStorageProfile describes a storage profile. type ImageStorageProfile struct { // OsDisk - Specifies information about the operating system disk used by the virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). @@ -2985,6 +3427,39 @@ type ImagesUpdateFuture struct { Result func(ImagesClient) (Image, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImagesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImagesUpdateFuture.Result. +func (future *ImagesUpdateFuture) result(client ImagesClient) (i Image, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.UpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // ImageUpdate the source user image virtual hard disk. Only tags may be updated. type ImageUpdate struct { *ImageProperties `json:"properties,omitempty"` @@ -3286,6 +3761,39 @@ type LogAnalyticsExportRequestRateByIntervalFuture struct { Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LogAnalyticsExportRequestRateByIntervalFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LogAnalyticsExportRequestRateByIntervalFuture.Result. +func (future *LogAnalyticsExportRequestRateByIntervalFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") + } + } + return +} + // LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type LogAnalyticsExportThrottledRequestsFuture struct { @@ -3295,6 +3803,39 @@ type LogAnalyticsExportThrottledRequestsFuture struct { Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LogAnalyticsExportThrottledRequestsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LogAnalyticsExportThrottledRequestsFuture.Result. +func (future *LogAnalyticsExportThrottledRequestsFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") + } + } + return +} + // LogAnalyticsInputBase api input base class for LogAnalytics Api. type LogAnalyticsInputBase struct { // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. @@ -4937,6 +5478,39 @@ type SnapshotsCreateOrUpdateFuture struct { Result func(SnapshotsClient) (Snapshot, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsCreateOrUpdateFuture.Result. +func (future *SnapshotsCreateOrUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.CreateOrUpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SnapshotsDeleteFuture struct { @@ -4946,6 +5520,33 @@ type SnapshotsDeleteFuture struct { Result func(SnapshotsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsDeleteFuture.Result. +func (future *SnapshotsDeleteFuture) result(client SnapshotsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SnapshotsGrantAccessFuture struct { @@ -4955,6 +5556,39 @@ type SnapshotsGrantAccessFuture struct { Result func(SnapshotsClient) (AccessURI, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsGrantAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsGrantAccessFuture.Result. +func (future *SnapshotsGrantAccessFuture) result(client SnapshotsClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + // SnapshotSku the snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. type SnapshotSku struct { // Name - The sku name. Possible values include: 'SnapshotStorageAccountTypesStandardLRS', 'SnapshotStorageAccountTypesPremiumLRS', 'SnapshotStorageAccountTypesStandardZRS' @@ -4981,15 +5615,75 @@ type SnapshotsRevokeAccessFuture struct { Result func(SnapshotsClient) (autorest.Response, error) } -// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type SnapshotsUpdateFuture struct { - azure.FutureAPI +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsRevokeAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsRevokeAccessFuture.Result. +func (future *SnapshotsRevokeAccessFuture) result(client SnapshotsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + +// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsUpdateFuture struct { + azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. Result func(SnapshotsClient) (Snapshot, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsUpdateFuture.Result. +func (future *SnapshotsUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.UpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // SnapshotUpdate snapshot update resource. type SnapshotUpdate struct { *SnapshotUpdateProperties `json:"properties,omitempty"` @@ -5734,6 +6428,39 @@ type VirtualMachineExtensionsCreateOrUpdateFuture struct { Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineExtensionsCreateOrUpdateFuture.Result. +func (future *VirtualMachineExtensionsCreateOrUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.CreateOrUpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineExtensionsDeleteFuture struct { @@ -5743,6 +6470,33 @@ type VirtualMachineExtensionsDeleteFuture struct { Result func(VirtualMachineExtensionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineExtensionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineExtensionsDeleteFuture.Result. +func (future *VirtualMachineExtensionsDeleteFuture) result(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineExtensionsListResult the List Extension operation response type VirtualMachineExtensionsListResult struct { autorest.Response `json:"-"` @@ -5759,6 +6513,39 @@ type VirtualMachineExtensionsUpdateFuture struct { Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineExtensionsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineExtensionsUpdateFuture.Result. +func (future *VirtualMachineExtensionsUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.UpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineExtensionUpdate describes a Virtual Machine Extension. type VirtualMachineExtensionUpdate struct { *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"` @@ -6722,6 +7509,39 @@ type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture struct { Result func(VirtualMachineScaleSetExtensionsClient) (VirtualMachineScaleSetExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetExtensionsCreateOrUpdateFuture.Result. +func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { + vmsse, err = client.CreateOrUpdateResponder(vmsse.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetExtensionsDeleteFuture struct { @@ -6731,6 +7551,33 @@ type VirtualMachineScaleSetExtensionsDeleteFuture struct { Result func(VirtualMachineScaleSetExtensionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetExtensionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetExtensionsDeleteFuture.Result. +func (future *VirtualMachineScaleSetExtensionsDeleteFuture) result(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetIdentity identity for the virtual machine scale set. type VirtualMachineScaleSetIdentity struct { // PrincipalID - READ-ONLY; The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity. @@ -7824,6 +8671,33 @@ type VirtualMachineScaleSetRollingUpgradesCancelFuture struct { Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetRollingUpgradesCancelFuture.Result. +func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture an abstraction for monitoring and // retrieving the results of a long-running operation. type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct { @@ -7833,6 +8707,33 @@ type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct { Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture.Result. +func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { @@ -7842,6 +8743,33 @@ type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture.Result. +func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetsCreateOrUpdateFuture struct { @@ -7851,6 +8779,39 @@ type VirtualMachineScaleSetsCreateOrUpdateFuture struct { Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsCreateOrUpdateFuture.Result. +func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { + vmss, err = client.CreateOrUpdateResponder(vmss.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetsDeallocateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsDeallocateFuture struct { @@ -7860,6 +8821,33 @@ type VirtualMachineScaleSetsDeallocateFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsDeallocateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsDeallocateFuture.Result. +func (future *VirtualMachineScaleSetsDeallocateFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsDeleteFuture struct { @@ -7869,6 +8857,33 @@ type VirtualMachineScaleSetsDeleteFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsDeleteFuture.Result. +func (future *VirtualMachineScaleSetsDeleteFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsDeleteInstancesFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetsDeleteInstancesFuture struct { @@ -7878,6 +8893,33 @@ type VirtualMachineScaleSetsDeleteInstancesFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsDeleteInstancesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsDeleteInstancesFuture.Result. +func (future *VirtualMachineScaleSetsDeleteInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetSku describes an available virtual machine scale set sku. type VirtualMachineScaleSetSku struct { // ResourceType - READ-ONLY; The type of resource the sku applies to. @@ -7909,6 +8951,33 @@ type VirtualMachineScaleSetsPerformMaintenanceFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsPerformMaintenanceFuture.Result. +func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsPowerOffFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsPowerOffFuture struct { @@ -7918,6 +8987,33 @@ type VirtualMachineScaleSetsPowerOffFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsPowerOffFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsPowerOffFuture.Result. +func (future *VirtualMachineScaleSetsPowerOffFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsRedeployFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsRedeployFuture struct { @@ -7927,6 +9023,33 @@ type VirtualMachineScaleSetsRedeployFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsRedeployFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsRedeployFuture.Result. +func (future *VirtualMachineScaleSetsRedeployFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsReimageAllFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsReimageAllFuture struct { @@ -7936,6 +9059,33 @@ type VirtualMachineScaleSetsReimageAllFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsReimageAllFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsReimageAllFuture.Result. +func (future *VirtualMachineScaleSetsReimageAllFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageAllFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsReimageFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsReimageFuture struct { @@ -7945,6 +9095,33 @@ type VirtualMachineScaleSetsReimageFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsReimageFuture.Result. +func (future *VirtualMachineScaleSetsReimageFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsRestartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsRestartFuture struct { @@ -7954,6 +9131,33 @@ type VirtualMachineScaleSetsRestartFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsRestartFuture.Result. +func (future *VirtualMachineScaleSetsRestartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsStartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsStartFuture struct { @@ -7963,6 +9167,33 @@ type VirtualMachineScaleSetsStartFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsStartFuture.Result. +func (future *VirtualMachineScaleSetsStartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetStorageProfile describes a virtual machine scale set storage profile. type VirtualMachineScaleSetStorageProfile struct { // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. @@ -7982,6 +9213,39 @@ type VirtualMachineScaleSetsUpdateFuture struct { Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsUpdateFuture.Result. +func (future *VirtualMachineScaleSetsUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { + vmss, err = client.UpdateResponder(vmss.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetsUpdateInstancesFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetsUpdateInstancesFuture struct { @@ -7991,6 +9255,33 @@ type VirtualMachineScaleSetsUpdateInstancesFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsUpdateInstancesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsUpdateInstancesFuture.Result. +func (future *VirtualMachineScaleSetsUpdateInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetUpdate describes a Virtual Machine Scale Set. type VirtualMachineScaleSetUpdate struct { // Sku - The virtual machine scale set sku. @@ -8900,6 +10191,33 @@ type VirtualMachineScaleSetVMsDeallocateFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsDeallocateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsDeallocateFuture.Result. +func (future *VirtualMachineScaleSetVMsDeallocateFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsDeleteFuture struct { @@ -8909,6 +10227,33 @@ type VirtualMachineScaleSetVMsDeleteFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsDeleteFuture.Result. +func (future *VirtualMachineScaleSetVMsDeleteFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsPerformMaintenanceFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct { @@ -8918,6 +10263,33 @@ type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsPerformMaintenanceFuture.Result. +func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsPowerOffFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsPowerOffFuture struct { @@ -8927,6 +10299,33 @@ type VirtualMachineScaleSetVMsPowerOffFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsPowerOffFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsPowerOffFuture.Result. +func (future *VirtualMachineScaleSetVMsPowerOffFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsRedeployFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsRedeployFuture struct { @@ -8936,6 +10335,33 @@ type VirtualMachineScaleSetVMsRedeployFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsRedeployFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsRedeployFuture.Result. +func (future *VirtualMachineScaleSetVMsRedeployFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsReimageAllFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsReimageAllFuture struct { @@ -8945,6 +10371,33 @@ type VirtualMachineScaleSetVMsReimageAllFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsReimageAllFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsReimageAllFuture.Result. +func (future *VirtualMachineScaleSetVMsReimageAllFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsReimageFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsReimageFuture struct { @@ -8954,6 +10407,33 @@ type VirtualMachineScaleSetVMsReimageFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsReimageFuture.Result. +func (future *VirtualMachineScaleSetVMsReimageFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsRestartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsRestartFuture struct { @@ -8963,6 +10443,33 @@ type VirtualMachineScaleSetVMsRestartFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsRestartFuture.Result. +func (future *VirtualMachineScaleSetVMsRestartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsRunCommandFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsRunCommandFuture struct { @@ -8972,6 +10479,39 @@ type VirtualMachineScaleSetVMsRunCommandFuture struct { Result func(VirtualMachineScaleSetVMsClient) (RunCommandResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsRunCommandFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsRunCommandFuture.Result. +func (future *VirtualMachineScaleSetVMsRunCommandFuture) result(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { + rcr, err = client.RunCommandResponder(rcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetVMsStartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsStartFuture struct { @@ -8981,6 +10521,33 @@ type VirtualMachineScaleSetVMsStartFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsStartFuture.Result. +func (future *VirtualMachineScaleSetVMsStartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsUpdateFuture struct { @@ -8990,6 +10557,39 @@ type VirtualMachineScaleSetVMsUpdateFuture struct { Result func(VirtualMachineScaleSetVMsClient) (VirtualMachineScaleSetVM, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsUpdateFuture.Result. +func (future *VirtualMachineScaleSetVMsUpdateFuture) result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmssv.Response.Response, err = future.GetResult(sender); err == nil && vmssv.Response.Response.StatusCode != http.StatusNoContent { + vmssv, err = client.UpdateResponder(vmssv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", vmssv.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesCaptureFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesCaptureFuture struct { @@ -8999,6 +10599,39 @@ type VirtualMachinesCaptureFuture struct { Result func(VirtualMachinesClient) (VirtualMachineCaptureResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesCaptureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesCaptureFuture.Result. +func (future *VirtualMachinesCaptureFuture) result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmcr.Response.Response, err = future.GetResult(sender); err == nil && vmcr.Response.Response.StatusCode != http.StatusNoContent { + vmcr, err = client.CaptureResponder(vmcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", vmcr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesConvertToManagedDisksFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesConvertToManagedDisksFuture struct { @@ -9008,6 +10641,33 @@ type VirtualMachinesConvertToManagedDisksFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesConvertToManagedDisksFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesConvertToManagedDisksFuture.Result. +func (future *VirtualMachinesConvertToManagedDisksFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesConvertToManagedDisksFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesCreateOrUpdateFuture struct { @@ -9017,6 +10677,39 @@ type VirtualMachinesCreateOrUpdateFuture struct { Result func(VirtualMachinesClient) (VirtualMachine, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesCreateOrUpdateFuture.Result. +func (future *VirtualMachinesCreateOrUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { + VM, err = client.CreateOrUpdateResponder(VM.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesDeallocateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesDeallocateFuture struct { @@ -9026,6 +10719,33 @@ type VirtualMachinesDeallocateFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesDeallocateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesDeallocateFuture.Result. +func (future *VirtualMachinesDeallocateFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesDeleteFuture struct { @@ -9035,6 +10755,33 @@ type VirtualMachinesDeleteFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesDeleteFuture.Result. +func (future *VirtualMachinesDeleteFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineSize describes the properties of a VM size. type VirtualMachineSize struct { // Name - The name of the virtual machine size. @@ -9067,6 +10814,33 @@ type VirtualMachinesPerformMaintenanceFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesPerformMaintenanceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesPerformMaintenanceFuture.Result. +func (future *VirtualMachinesPerformMaintenanceFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesPowerOffFuture struct { @@ -9076,6 +10850,33 @@ type VirtualMachinesPowerOffFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesPowerOffFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesPowerOffFuture.Result. +func (future *VirtualMachinesPowerOffFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesRedeployFuture struct { @@ -9085,6 +10886,33 @@ type VirtualMachinesRedeployFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesRedeployFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesRedeployFuture.Result. +func (future *VirtualMachinesRedeployFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesReimageFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesReimageFuture struct { @@ -9094,6 +10922,33 @@ type VirtualMachinesReimageFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesReimageFuture.Result. +func (future *VirtualMachinesReimageFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReimageFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesRestartFuture struct { @@ -9103,6 +10958,33 @@ type VirtualMachinesRestartFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesRestartFuture.Result. +func (future *VirtualMachinesRestartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRunCommandFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesRunCommandFuture struct { @@ -9112,6 +10994,39 @@ type VirtualMachinesRunCommandFuture struct { Result func(VirtualMachinesClient) (RunCommandResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesRunCommandFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesRunCommandFuture.Result. +func (future *VirtualMachinesRunCommandFuture) result(client VirtualMachinesClient) (rcr RunCommandResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { + rcr, err = client.RunCommandResponder(rcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesStartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesStartFuture struct { @@ -9121,6 +11036,33 @@ type VirtualMachinesStartFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesStartFuture.Result. +func (future *VirtualMachinesStartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineStatusCodeCount the status code and count of the virtual machine scale set instance view // status summary. type VirtualMachineStatusCodeCount struct { @@ -9139,6 +11081,39 @@ type VirtualMachinesUpdateFuture struct { Result func(VirtualMachinesClient) (VirtualMachine, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesUpdateFuture.Result. +func (future *VirtualMachinesUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { + VM, err = client.UpdateResponder(VM.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineUpdate describes a Virtual Machine Update. type VirtualMachineUpdate struct { // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go index ecd3c13f4f8..72f4ddf5214 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go index b4096fa1a69..2a495e27e7f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/proximityplacementgroups.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go index ba9243e02ad..f42e100fce0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go index 28bb94d3676..6ebed25730a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -130,30 +119,7 @@ func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future Sn var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (s Snapshot, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.CreateOrUpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -232,20 +198,7 @@ func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsD var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -409,30 +362,7 @@ func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future Snaps var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (au AccessURI, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - au.Response.Response, err = future.GetResult(sender) - if au.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", nil, "received nil response and error") - } - if err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -740,20 +670,7 @@ func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future Snap var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -834,30 +751,7 @@ func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsU var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (s Snapshot, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.UpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go index 9872fc35a9f..5da784bce92 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go index 8fa5eccbca6..986a65475c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go @@ -2,19 +2,8 @@ package compute import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go index f0f0d077607..d389b76c3c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go index 57af74f7150..42b9f0ac108 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -109,30 +98,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vme.Response.Response, err = future.GetResult(sender) - if vme.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { - vme, err = client.CreateOrUpdateResponder(vme.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -212,20 +178,7 @@ func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -469,30 +422,7 @@ func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vme.Response.Response, err = future.GetResult(sender) - if vme.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { - vme, err = client.UpdateResponder(vme.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go index 6e027eb368a..59450d69d05 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go index 851d0869ea0..0eb5fa41407 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go index dd1e3784fcd..d22c47019b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,30 +105,7 @@ func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future Vir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmcr.Response.Response, err = future.GetResult(sender) - if vmcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmcr.Response.Response.StatusCode != http.StatusNoContent { - vmcr, err = client.CaptureResponder(vmcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", vmcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -218,20 +184,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesConvertToManagedDisksFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -333,30 +286,7 @@ func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (VM VirtualMachine, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - VM.Response.Response, err = future.GetResult(sender) - if VM.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { - VM, err = client.CreateOrUpdateResponder(VM.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -435,20 +365,7 @@ func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -525,20 +442,7 @@ func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1275,20 +1179,7 @@ func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1366,20 +1257,7 @@ func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future Vi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1456,20 +1334,7 @@ func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future Vi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1552,20 +1417,7 @@ func (client VirtualMachinesClient) ReimageSender(req *http.Request) (future Vir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReimageFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1642,20 +1494,7 @@ func (client VirtualMachinesClient) RestartSender(req *http.Request) (future Vir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1741,30 +1580,7 @@ func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (rcr RunCommandResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rcr.Response.Response, err = future.GetResult(sender) - if rcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", nil, "received nil response and error") - } - if err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { - rcr, err = client.RunCommandResponder(rcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1842,20 +1658,7 @@ func (client VirtualMachinesClient) StartSender(req *http.Request) (future Virtu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1935,30 +1738,7 @@ func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (VM VirtualMachine, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - VM.Response.Response, err = future.GetResult(sender) - if VM.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { - VM, err = client.UpdateResponder(VM.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go index cf640fdfbb8..dd603701443 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -109,30 +98,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmsse.Response.Response, err = future.GetResult(sender) - if vmsse.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { - vmsse, err = client.CreateOrUpdateResponder(vmsse.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -212,20 +178,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go index 62907e5738c..003dfc3ee08 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -105,20 +94,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -273,20 +249,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeS var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -364,20 +327,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(r var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go index fbdee08c7e9..f7e9469d67b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -131,30 +120,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmss.Response.Response, err = future.GetResult(sender) - if vmss.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { - vmss, err = client.CreateOrUpdateResponder(vmss.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -239,20 +205,7 @@ func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -329,20 +282,7 @@ func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -428,20 +368,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1225,20 +1152,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1322,20 +1236,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1419,20 +1320,7 @@ func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1516,20 +1404,7 @@ func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1613,20 +1488,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageAllFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1709,20 +1571,7 @@ func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1805,20 +1654,7 @@ func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1898,30 +1734,7 @@ func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmss.Response.Response, err = future.GetResult(sender) - if vmss.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { - vmss, err = client.UpdateResponder(vmss.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -2008,20 +1821,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go index 9e51babca1a..fb399e30c09 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -109,20 +98,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -201,20 +177,7 @@ func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -580,20 +543,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -673,20 +623,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -766,20 +703,7 @@ func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -864,20 +788,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -957,20 +868,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1049,20 +947,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1150,30 +1035,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rcr.Response.Response, err = future.GetResult(sender) - if rcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", nil, "received nil response and error") - } - if err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { - rcr, err = client.RunCommandResponder(rcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1253,20 +1115,7 @@ func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1373,30 +1222,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmssv.Response.Response, err = future.GetResult(sender) - if vmssv.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmssv.Response.Response.StatusCode != http.StatusNoContent { - vmssv, err = client.UpdateResponder(vmssv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", vmssv.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go index c06436e05eb..449ce499a35 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/CHANGELOG.md index 3e1e152217f..7f530d83dac 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/CHANGELOG.md @@ -1,5 +1,106 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/compute/resource-manager/readme.md tag: `package-2020-06-01` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *ContainerServicesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ContainerServicesDeleteFuture.UnmarshalJSON([]byte) error +1. *DedicatedHostsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DedicatedHostsDeleteFuture.UnmarshalJSON([]byte) error +1. *DedicatedHostsUpdateFuture.UnmarshalJSON([]byte) error +1. *DiskAccessesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DiskAccessesDeleteFuture.UnmarshalJSON([]byte) error +1. *DiskAccessesUpdateFuture.UnmarshalJSON([]byte) error +1. *DiskEncryptionSetsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DiskEncryptionSetsDeleteFuture.UnmarshalJSON([]byte) error +1. *DiskEncryptionSetsUpdateFuture.UnmarshalJSON([]byte) error +1. *DisksCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DisksDeleteFuture.UnmarshalJSON([]byte) error +1. *DisksGrantAccessFuture.UnmarshalJSON([]byte) error +1. *DisksRevokeAccessFuture.UnmarshalJSON([]byte) error +1. *DisksUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleriesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleriesDeleteFuture.UnmarshalJSON([]byte) error +1. *GalleriesUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryApplicationVersionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryApplicationVersionsDeleteFuture.UnmarshalJSON([]byte) error +1. *GalleryApplicationVersionsUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryApplicationsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryApplicationsDeleteFuture.UnmarshalJSON([]byte) error +1. *GalleryApplicationsUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryImageVersionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryImageVersionsDeleteFuture.UnmarshalJSON([]byte) error +1. *GalleryImageVersionsUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryImagesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *GalleryImagesDeleteFuture.UnmarshalJSON([]byte) error +1. *GalleryImagesUpdateFuture.UnmarshalJSON([]byte) error +1. *ImagesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ImagesDeleteFuture.UnmarshalJSON([]byte) error +1. *ImagesUpdateFuture.UnmarshalJSON([]byte) error +1. *LogAnalyticsExportRequestRateByIntervalFuture.UnmarshalJSON([]byte) error +1. *LogAnalyticsExportThrottledRequestsFuture.UnmarshalJSON([]byte) error +1. *SnapshotsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SnapshotsDeleteFuture.UnmarshalJSON([]byte) error +1. *SnapshotsGrantAccessFuture.UnmarshalJSON([]byte) error +1. *SnapshotsRevokeAccessFuture.UnmarshalJSON([]byte) error +1. *SnapshotsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineExtensionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineExtensionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineExtensionsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineRunCommandsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineRunCommandsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineRunCommandsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetExtensionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetExtensionsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetRollingUpgradesCancelFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMExtensionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMExtensionsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMRunCommandsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMRunCommandsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsDeallocateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsPerformMaintenanceFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsPowerOffFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsRedeployFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsReimageAllFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsReimageFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsRestartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsRunCommandFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsStartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetVMsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsDeallocateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsDeleteInstancesFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsPerformMaintenanceFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsPowerOffFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsRedeployFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsReimageAllFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsReimageFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsRestartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsSetOrchestrationServiceStateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsStartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachineScaleSetsUpdateInstancesFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesAssessPatchesFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesCaptureFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesConvertToManagedDisksFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesDeallocateFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesPerformMaintenanceFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesPowerOffFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesReapplyFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesRedeployFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesReimageFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesRestartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesRunCommandFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesStartFuture.UnmarshalJSON([]byte) error +1. *VirtualMachinesUpdateFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/_meta.json new file mode 100644 index 00000000000..8e7b5e0d172 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", + "tag": "package-2020-06-01", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2020-06-01 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/availabilitysets.go index 562e01215c2..a4827a680aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/availabilitysets.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/client.go index d9d0430190e..1812f27feb9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/client.go @@ -3,19 +3,8 @@ // Compute Client package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/containerservices.go index 23e5e17e7de..d27f363dab6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/containerservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/containerservices.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -140,30 +129,7 @@ func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ContainerServicesClient) (cs ContainerService, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cs.Response.Response, err = future.GetResult(sender) - if cs.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { - cs, err = client.CreateOrUpdateResponder(cs.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -244,20 +210,7 @@ func (client ContainerServicesClient) DeleteSender(req *http.Request) (future Co var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ContainerServicesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ContainerServicesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhostgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhostgroups.go index 551f401fb79..b4b7af06474 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhostgroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhostgroups.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhosts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhosts.go index acda141c387..fb86c5e9848 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhosts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/dedicatedhosts.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -119,30 +108,7 @@ func (client DedicatedHostsClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DedicatedHostsClient) (dh DedicatedHost, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dh.Response.Response, err = future.GetResult(sender) - if dh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { - dh, err = client.CreateOrUpdateResponder(dh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -222,20 +188,7 @@ func (client DedicatedHostsClient) DeleteSender(req *http.Request) (future Dedic var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DedicatedHostsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -518,30 +471,7 @@ func (client DedicatedHostsClient) UpdateSender(req *http.Request) (future Dedic var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DedicatedHostsClient) (dh DedicatedHost, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dh.Response.Response, err = future.GetResult(sender) - if dh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { - dh, err = client.UpdateResponder(dh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskaccesses.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskaccesses.go index 8641e0e3807..b758073e68e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskaccesses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskaccesses.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client DiskAccessesClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DiskAccessesClient) (da DiskAccess, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - da.Response.Response, err = future.GetResult(sender) - if da.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && da.Response.Response.StatusCode != http.StatusNoContent { - da, err = client.CreateOrUpdateResponder(da.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", da.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -211,20 +177,7 @@ func (client DiskAccessesClient) DeleteSender(req *http.Request) (future DiskAcc var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DiskAccessesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -691,30 +644,7 @@ func (client DiskAccessesClient) UpdateSender(req *http.Request) (future DiskAcc var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DiskAccessesClient) (da DiskAccess, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - da.Response.Response, err = future.GetResult(sender) - if da.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && da.Response.Response.StatusCode != http.StatusNoContent { - da, err = client.UpdateResponder(da.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", da.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskencryptionsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskencryptionsets.go index 018e5b377ae..1871110267a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskencryptionsets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/diskencryptionsets.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -122,30 +111,7 @@ func (client DiskEncryptionSetsClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - desVar.Response.Response, err = future.GetResult(sender) - if desVar.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { - desVar, err = client.CreateOrUpdateResponder(desVar.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -225,20 +191,7 @@ func (client DiskEncryptionSetsClient) DeleteSender(req *http.Request) (future D var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DiskEncryptionSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -628,30 +581,7 @@ func (client DiskEncryptionSetsClient) UpdateSender(req *http.Request) (future D var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - desVar.Response.Response, err = future.GetResult(sender) - if desVar.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { - desVar, err = client.UpdateResponder(desVar.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/disks.go index ee38bdb51cb..c9b8564bc1b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/disks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/disks.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -126,30 +115,7 @@ func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksC var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (d Disk, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - d.Response.Response, err = future.GetResult(sender) - if d.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.CreateOrUpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -229,20 +195,7 @@ func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -408,30 +361,7 @@ func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGran var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (au AccessURI, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - au.Response.Response, err = future.GetResult(sender) - if au.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", nil, "received nil response and error") - } - if err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -740,20 +670,7 @@ func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRev var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -835,30 +752,7 @@ func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DisksClient) (d Disk, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - d.Response.Response, err = future.GetResult(sender) - if d.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.UpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/enums.go index 34ca493fdb5..a8bbac13f8b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/enums.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleries.go index bdffcebd56c..75873554375 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleries.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future Ga var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleriesClient) (g Gallery, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - g.Response.Response, err = future.GetResult(sender) - if g.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && g.Response.Response.StatusCode != http.StatusNoContent { - g, err = client.CreateOrUpdateResponder(g.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesD var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleriesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -607,30 +560,7 @@ func (client GalleriesClient) UpdateSender(req *http.Request) (future GalleriesU var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleriesClient) (g Gallery, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleriesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - g.Response.Response, err = future.GetResult(sender) - if g.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && g.Response.Response.StatusCode != http.StatusNoContent { - g, err = client.UpdateResponder(g.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", g.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplications.go index a1a311d3c5c..9f50ac068e7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplications.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -112,30 +101,7 @@ func (client GalleryApplicationsClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryApplicationsClient) (ga GalleryApplication, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ga.Response.Response, err = future.GetResult(sender) - if ga.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { - ga, err = client.CreateOrUpdateResponder(ga.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -216,20 +182,7 @@ func (client GalleryApplicationsClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryApplicationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -512,30 +465,7 @@ func (client GalleryApplicationsClient) UpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryApplicationsClient) (ga GalleryApplication, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ga.Response.Response, err = future.GetResult(sender) - if ga.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { - ga, err = client.UpdateResponder(ga.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplicationversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplicationversions.go index 4b23ac87e4e..122818fed01 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplicationversions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryapplicationversions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -129,30 +118,7 @@ func (client GalleryApplicationVersionsClient) CreateOrUpdateSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gav.Response.Response, err = future.GetResult(sender) - if gav.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { - gav, err = client.CreateOrUpdateResponder(gav.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -235,20 +201,7 @@ func (client GalleryApplicationVersionsClient) DeleteSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryApplicationVersionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -541,30 +494,7 @@ func (client GalleryApplicationVersionsClient) UpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gav.Response.Response, err = future.GetResult(sender) - if gav.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { - gav, err = client.UpdateResponder(gav.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimages.go index 7834b6f66ed..98858ff6c7e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimages.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImagesClient) (gi GalleryImage, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gi.Response.Response, err = future.GetResult(sender) - if gi.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { - gi, err = client.CreateOrUpdateResponder(gi.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client GalleryImagesClient) DeleteSender(req *http.Request) (future Galler var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImagesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -519,30 +472,7 @@ func (client GalleryImagesClient) UpdateSender(req *http.Request) (future Galler var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImagesClient) (gi GalleryImage, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gi.Response.Response, err = future.GetResult(sender) - if gi.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { - gi, err = client.UpdateResponder(gi.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimageversions.go index 4eb62ef2057..64ab355fd85 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimageversions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/galleryimageversions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -121,30 +110,7 @@ func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - giv.Response.Response, err = future.GetResult(sender) - if giv.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { - giv, err = client.CreateOrUpdateResponder(giv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImageVersionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -530,30 +483,7 @@ func (client GalleryImageVersionsClient) UpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - giv.Response.Response, err = future.GetResult(sender) - if giv.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { - giv, err = client.UpdateResponder(giv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/images.go index 0bbbc92e123..1048545e11a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/images.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future Image var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.CreateOrUpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImagesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.UpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/loganalytics.go index fe5c76d23c5..d96a293e4a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/loganalytics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/loganalytics.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -112,30 +101,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - laor.Response.Response, err = future.GetResult(sender) - if laor.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", nil, "received nil response and error") - } - if err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -221,30 +187,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - laor.Response.Response, err = future.GetResult(sender) - if laor.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", nil, "received nil response and error") - } - if err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/models.go index 0ab59eddd3c..ad0b3ede86a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/models.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -946,6 +935,39 @@ type ContainerServicesCreateOrUpdateFuture struct { Result func(ContainerServicesClient) (ContainerService, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ContainerServicesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ContainerServicesCreateOrUpdateFuture.Result. +func (future *ContainerServicesCreateOrUpdateFuture) result(client ContainerServicesClient) (cs ContainerService, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { + cs, err = client.CreateOrUpdateResponder(cs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") + } + } + return +} + // ContainerServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ContainerServicesDeleteFuture struct { @@ -955,6 +977,33 @@ type ContainerServicesDeleteFuture struct { Result func(ContainerServicesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ContainerServicesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ContainerServicesDeleteFuture.Result. +func (future *ContainerServicesDeleteFuture) result(client ContainerServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ContainerServiceServicePrincipalProfile information about a service principal identity for the cluster // to use for manipulating Azure APIs. type ContainerServiceServicePrincipalProfile struct { @@ -1877,6 +1926,39 @@ type DedicatedHostsCreateOrUpdateFuture struct { Result func(DedicatedHostsClient) (DedicatedHost, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DedicatedHostsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DedicatedHostsCreateOrUpdateFuture.Result. +func (future *DedicatedHostsCreateOrUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { + dh, err = client.CreateOrUpdateResponder(dh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") + } + } + return +} + // DedicatedHostsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DedicatedHostsDeleteFuture struct { @@ -1886,6 +1968,33 @@ type DedicatedHostsDeleteFuture struct { Result func(DedicatedHostsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DedicatedHostsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DedicatedHostsDeleteFuture.Result. +func (future *DedicatedHostsDeleteFuture) result(client DedicatedHostsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DedicatedHostsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DedicatedHostsUpdateFuture struct { @@ -1895,6 +2004,39 @@ type DedicatedHostsUpdateFuture struct { Result func(DedicatedHostsClient) (DedicatedHost, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DedicatedHostsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DedicatedHostsUpdateFuture.Result. +func (future *DedicatedHostsUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { + dh, err = client.UpdateResponder(dh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") + } + } + return +} + // DedicatedHostUpdate specifies information about the dedicated host. Only tags, autoReplaceOnFailure and // licenseType may be updated. type DedicatedHostUpdate struct { @@ -2234,6 +2376,39 @@ type DiskAccessesCreateOrUpdateFuture struct { Result func(DiskAccessesClient) (DiskAccess, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesCreateOrUpdateFuture.Result. +func (future *DiskAccessesCreateOrUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent { + da, err = client.CreateOrUpdateResponder(da.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", da.Response.Response, "Failure responding to request") + } + } + return +} + // DiskAccessesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DiskAccessesDeleteFuture struct { @@ -2243,6 +2418,33 @@ type DiskAccessesDeleteFuture struct { Result func(DiskAccessesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesDeleteFuture.Result. +func (future *DiskAccessesDeleteFuture) result(client DiskAccessesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DiskAccessesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DiskAccessesUpdateFuture struct { @@ -2252,6 +2454,39 @@ type DiskAccessesUpdateFuture struct { Result func(DiskAccessesClient) (DiskAccess, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesUpdateFuture.Result. +func (future *DiskAccessesUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent { + da, err = client.UpdateResponder(da.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", da.Response.Response, "Failure responding to request") + } + } + return +} + // DiskAccessList the List disk access operation response. type DiskAccessList struct { autorest.Response `json:"-"` @@ -2725,6 +2960,39 @@ type DiskEncryptionSetsCreateOrUpdateFuture struct { Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskEncryptionSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskEncryptionSetsCreateOrUpdateFuture.Result. +func (future *DiskEncryptionSetsCreateOrUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { + desVar, err = client.CreateOrUpdateResponder(desVar.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") + } + } + return +} + // DiskEncryptionSetsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DiskEncryptionSetsDeleteFuture struct { @@ -2734,6 +3002,33 @@ type DiskEncryptionSetsDeleteFuture struct { Result func(DiskEncryptionSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskEncryptionSetsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskEncryptionSetsDeleteFuture.Result. +func (future *DiskEncryptionSetsDeleteFuture) result(client DiskEncryptionSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DiskEncryptionSetsUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DiskEncryptionSetsUpdateFuture struct { @@ -2743,6 +3038,39 @@ type DiskEncryptionSetsUpdateFuture struct { Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskEncryptionSetsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskEncryptionSetsUpdateFuture.Result. +func (future *DiskEncryptionSetsUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { + desVar, err = client.UpdateResponder(desVar.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") + } + } + return +} + // DiskEncryptionSettings describes a Encryption Settings for a Disk type DiskEncryptionSettings struct { // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. @@ -3083,6 +3411,39 @@ type DisksCreateOrUpdateFuture struct { Result func(DisksClient) (Disk, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksCreateOrUpdateFuture.Result. +func (future *DisksCreateOrUpdateFuture) result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + // DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type DisksDeleteFuture struct { azure.FutureAPI @@ -3091,6 +3452,33 @@ type DisksDeleteFuture struct { Result func(DisksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksDeleteFuture.Result. +func (future *DisksDeleteFuture) result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DisksGrantAccessFuture struct { @@ -3100,6 +3488,39 @@ type DisksGrantAccessFuture struct { Result func(DisksClient) (AccessURI, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksGrantAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksGrantAccessFuture.Result. +func (future *DisksGrantAccessFuture) result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + // DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. type DiskSku struct { // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS', 'UltraSSDLRS' @@ -3126,6 +3547,33 @@ type DisksRevokeAccessFuture struct { Result func(DisksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksRevokeAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksRevokeAccessFuture.Result. +func (future *DisksRevokeAccessFuture) result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + // DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type DisksUpdateFuture struct { azure.FutureAPI @@ -3134,6 +3582,39 @@ type DisksUpdateFuture struct { Result func(DisksClient) (Disk, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksUpdateFuture.Result. +func (future *DisksUpdateFuture) result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + // DiskUpdate disk update resource. type DiskUpdate struct { *DiskUpdateProperties `json:"properties,omitempty"` @@ -3312,6 +3793,39 @@ type GalleriesCreateOrUpdateFuture struct { Result func(GalleriesClient) (Gallery, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesCreateOrUpdateFuture.Result. +func (future *GalleriesCreateOrUpdateFuture) result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.CreateOrUpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + // GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type GalleriesDeleteFuture struct { @@ -3321,6 +3835,33 @@ type GalleriesDeleteFuture struct { Result func(GalleriesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesDeleteFuture.Result. +func (future *GalleriesDeleteFuture) result(client GalleriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // GalleriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type GalleriesUpdateFuture struct { @@ -3330,6 +3871,39 @@ type GalleriesUpdateFuture struct { Result func(GalleriesClient) (Gallery, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesUpdateFuture.Result. +func (future *GalleriesUpdateFuture) result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.UpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + // Gallery specifies information about the Shared Image Gallery that you want to create or update. type Gallery struct { autorest.Response `json:"-"` @@ -3715,6 +4289,39 @@ type GalleryApplicationsCreateOrUpdateFuture struct { Result func(GalleryApplicationsClient) (GalleryApplication, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationsCreateOrUpdateFuture.Result. +func (future *GalleryApplicationsCreateOrUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { + ga, err = client.CreateOrUpdateResponder(ga.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryApplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type GalleryApplicationsDeleteFuture struct { @@ -3724,6 +4331,33 @@ type GalleryApplicationsDeleteFuture struct { Result func(GalleryApplicationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationsDeleteFuture.Result. +func (future *GalleryApplicationsDeleteFuture) result(client GalleryApplicationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // GalleryApplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type GalleryApplicationsUpdateFuture struct { @@ -3733,10 +4367,43 @@ type GalleryApplicationsUpdateFuture struct { Result func(GalleryApplicationsClient) (GalleryApplication, error) } -// GalleryApplicationUpdate specifies information about the gallery Application Definition that you want to -// update. -type GalleryApplicationUpdate struct { - *GalleryApplicationProperties `json:"properties,omitempty"` +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationsUpdateFuture.Result. +func (future *GalleryApplicationsUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { + ga, err = client.UpdateResponder(ga.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryApplicationUpdate specifies information about the gallery Application Definition that you want to +// update. +type GalleryApplicationUpdate struct { + *GalleryApplicationProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name @@ -4158,6 +4825,39 @@ type GalleryApplicationVersionsCreateOrUpdateFuture struct { Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationVersionsCreateOrUpdateFuture.Result. +func (future *GalleryApplicationVersionsCreateOrUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { + gav, err = client.CreateOrUpdateResponder(gav.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryApplicationVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type GalleryApplicationVersionsDeleteFuture struct { @@ -4167,6 +4867,33 @@ type GalleryApplicationVersionsDeleteFuture struct { Result func(GalleryApplicationVersionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationVersionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationVersionsDeleteFuture.Result. +func (future *GalleryApplicationVersionsDeleteFuture) result(client GalleryApplicationVersionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // GalleryApplicationVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type GalleryApplicationVersionsUpdateFuture struct { @@ -4176,6 +4903,39 @@ type GalleryApplicationVersionsUpdateFuture struct { Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationVersionsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationVersionsUpdateFuture.Result. +func (future *GalleryApplicationVersionsUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { + gav, err = client.UpdateResponder(gav.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryApplicationVersionUpdate specifies information about the gallery Application Version that you // want to update. type GalleryApplicationVersionUpdate struct { @@ -4709,6 +5469,39 @@ type GalleryImagesCreateOrUpdateFuture struct { Result func(GalleryImagesClient) (GalleryImage, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesCreateOrUpdateFuture.Result. +func (future *GalleryImagesCreateOrUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.CreateOrUpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type GalleryImagesDeleteFuture struct { @@ -4718,6 +5511,33 @@ type GalleryImagesDeleteFuture struct { Result func(GalleryImagesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesDeleteFuture.Result. +func (future *GalleryImagesDeleteFuture) result(client GalleryImagesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // GalleryImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type GalleryImagesUpdateFuture struct { @@ -4727,6 +5547,39 @@ type GalleryImagesUpdateFuture struct { Result func(GalleryImagesClient) (GalleryImage, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesUpdateFuture.Result. +func (future *GalleryImagesUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.UpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryImageUpdate specifies information about the gallery Image Definition that you want to update. type GalleryImageUpdate struct { *GalleryImageProperties `json:"properties,omitempty"` @@ -5140,6 +5993,39 @@ type GalleryImageVersionsCreateOrUpdateFuture struct { Result func(GalleryImageVersionsClient) (GalleryImageVersion, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImageVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImageVersionsCreateOrUpdateFuture.Result. +func (future *GalleryImageVersionsCreateOrUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.CreateOrUpdateResponder(giv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type GalleryImageVersionsDeleteFuture struct { @@ -5149,6 +6035,33 @@ type GalleryImageVersionsDeleteFuture struct { Result func(GalleryImageVersionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImageVersionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImageVersionsDeleteFuture.Result. +func (future *GalleryImageVersionsDeleteFuture) result(client GalleryImageVersionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // GalleryImageVersionStorageProfile this is the storage profile of a Gallery Image Version. type GalleryImageVersionStorageProfile struct { Source *GalleryArtifactVersionSource `json:"source,omitempty"` @@ -5166,6 +6079,39 @@ type GalleryImageVersionsUpdateFuture struct { Result func(GalleryImageVersionsClient) (GalleryImageVersion, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImageVersionsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImageVersionsUpdateFuture.Result. +func (future *GalleryImageVersionsUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.UpdateResponder(giv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") + } + } + return +} + // GalleryImageVersionUpdate specifies information about the gallery Image Version that you want to update. type GalleryImageVersionUpdate struct { *GalleryImageVersionProperties `json:"properties,omitempty"` @@ -5967,6 +6913,39 @@ type ImagesCreateOrUpdateFuture struct { Result func(ImagesClient) (Image, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImagesCreateOrUpdateFuture.Result. +func (future *ImagesCreateOrUpdateFuture) result(client ImagesClient) (i Image, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.CreateOrUpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type ImagesDeleteFuture struct { azure.FutureAPI @@ -5975,6 +6954,33 @@ type ImagesDeleteFuture struct { Result func(ImagesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImagesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImagesDeleteFuture.Result. +func (future *ImagesDeleteFuture) result(client ImagesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ImageStorageProfile describes a storage profile. type ImageStorageProfile struct { // OsDisk - Specifies information about the operating system disk used by the virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). @@ -5993,6 +6999,39 @@ type ImagesUpdateFuture struct { Result func(ImagesClient) (Image, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImagesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImagesUpdateFuture.Result. +func (future *ImagesUpdateFuture) result(client ImagesClient) (i Image, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.UpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // ImageUpdate the source user image virtual hard disk. Only tags may be updated. type ImageUpdate struct { *ImageProperties `json:"properties,omitempty"` @@ -6324,6 +7363,39 @@ type LogAnalyticsExportRequestRateByIntervalFuture struct { Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LogAnalyticsExportRequestRateByIntervalFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LogAnalyticsExportRequestRateByIntervalFuture.Result. +func (future *LogAnalyticsExportRequestRateByIntervalFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") + } + } + return +} + // LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type LogAnalyticsExportThrottledRequestsFuture struct { @@ -6333,6 +7405,39 @@ type LogAnalyticsExportThrottledRequestsFuture struct { Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LogAnalyticsExportThrottledRequestsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LogAnalyticsExportThrottledRequestsFuture.Result. +func (future *LogAnalyticsExportThrottledRequestsFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") + } + } + return +} + // LogAnalyticsInputBase api input base class for LogAnalytics Api. type LogAnalyticsInputBase struct { // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. @@ -8283,6 +9388,39 @@ type SnapshotsCreateOrUpdateFuture struct { Result func(SnapshotsClient) (Snapshot, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsCreateOrUpdateFuture.Result. +func (future *SnapshotsCreateOrUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.CreateOrUpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SnapshotsDeleteFuture struct { @@ -8292,6 +9430,33 @@ type SnapshotsDeleteFuture struct { Result func(SnapshotsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsDeleteFuture.Result. +func (future *SnapshotsDeleteFuture) result(client SnapshotsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SnapshotsGrantAccessFuture struct { @@ -8301,6 +9466,39 @@ type SnapshotsGrantAccessFuture struct { Result func(SnapshotsClient) (AccessURI, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsGrantAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsGrantAccessFuture.Result. +func (future *SnapshotsGrantAccessFuture) result(client SnapshotsClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + // SnapshotSku the snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. type SnapshotSku struct { // Name - The sku name. Possible values include: 'SnapshotStorageAccountTypesStandardLRS', 'SnapshotStorageAccountTypesPremiumLRS', 'SnapshotStorageAccountTypesStandardZRS' @@ -8327,6 +9525,33 @@ type SnapshotsRevokeAccessFuture struct { Result func(SnapshotsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsRevokeAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsRevokeAccessFuture.Result. +func (future *SnapshotsRevokeAccessFuture) result(client SnapshotsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + // SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SnapshotsUpdateFuture struct { @@ -8336,6 +9561,39 @@ type SnapshotsUpdateFuture struct { Result func(SnapshotsClient) (Snapshot, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SnapshotsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SnapshotsUpdateFuture.Result. +func (future *SnapshotsUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.UpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // SnapshotUpdate snapshot update resource. type SnapshotUpdate struct { *SnapshotUpdateProperties `json:"properties,omitempty"` @@ -9491,13 +10749,73 @@ type VirtualMachineExtensionsCreateOrUpdateFuture struct { Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error) } -// VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineExtensionsDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(VirtualMachineExtensionsClient) (autorest.Response, error) +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineExtensionsCreateOrUpdateFuture.Result. +func (future *VirtualMachineExtensionsCreateOrUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.CreateOrUpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineExtensionsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(VirtualMachineExtensionsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineExtensionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineExtensionsDeleteFuture.Result. +func (future *VirtualMachineExtensionsDeleteFuture) result(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return } // VirtualMachineExtensionsListResult the List Extension operation response @@ -9516,6 +10834,39 @@ type VirtualMachineExtensionsUpdateFuture struct { Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineExtensionsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineExtensionsUpdateFuture.Result. +func (future *VirtualMachineExtensionsUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.UpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineExtensionUpdate describes a Virtual Machine Extension. type VirtualMachineExtensionUpdate struct { *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"` @@ -10311,6 +11662,39 @@ type VirtualMachineRunCommandsCreateOrUpdateFuture struct { Result func(VirtualMachineRunCommandsClient) (VirtualMachineRunCommand, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineRunCommandsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineRunCommandsCreateOrUpdateFuture.Result. +func (future *VirtualMachineRunCommandsCreateOrUpdateFuture) result(client VirtualMachineRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { + vmrc, err = client.CreateOrUpdateResponder(vmrc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsCreateOrUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineRunCommandScriptSource describes the script sources for run command. type VirtualMachineRunCommandScriptSource struct { // Script - Specifies the script content to be executed on the VM. @@ -10330,6 +11714,33 @@ type VirtualMachineRunCommandsDeleteFuture struct { Result func(VirtualMachineRunCommandsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineRunCommandsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineRunCommandsDeleteFuture.Result. +func (future *VirtualMachineRunCommandsDeleteFuture) result(client VirtualMachineRunCommandsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineRunCommandsListResult the List run command operation response type VirtualMachineRunCommandsListResult struct { autorest.Response `json:"-"` @@ -10499,6 +11910,39 @@ type VirtualMachineRunCommandsUpdateFuture struct { Result func(VirtualMachineRunCommandsClient) (VirtualMachineRunCommand, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineRunCommandsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineRunCommandsUpdateFuture.Result. +func (future *VirtualMachineRunCommandsUpdateFuture) result(client VirtualMachineRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { + vmrc, err = client.UpdateResponder(vmrc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineRunCommandUpdate describes a Virtual Machine run command. type VirtualMachineRunCommandUpdate struct { *VirtualMachineRunCommandProperties `json:"properties,omitempty"` @@ -10560,6 +12004,39 @@ type VirtualMachinesAssessPatchesFuture struct { Result func(VirtualMachinesClient) (VirtualMachineAssessPatchesResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesAssessPatchesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesAssessPatchesFuture.Result. +func (future *VirtualMachinesAssessPatchesFuture) result(client VirtualMachinesClient) (vmapr VirtualMachineAssessPatchesResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesAssessPatchesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesAssessPatchesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmapr.Response.Response, err = future.GetResult(sender); err == nil && vmapr.Response.Response.StatusCode != http.StatusNoContent { + vmapr, err = client.AssessPatchesResponder(vmapr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesAssessPatchesFuture", "Result", vmapr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSet describes a Virtual Machine Scale Set. type VirtualMachineScaleSet struct { autorest.Response `json:"-"` @@ -11048,6 +12525,39 @@ type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture struct { Result func(VirtualMachineScaleSetExtensionsClient) (VirtualMachineScaleSetExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetExtensionsCreateOrUpdateFuture.Result. +func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { + vmsse, err = client.CreateOrUpdateResponder(vmsse.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetExtensionsDeleteFuture struct { @@ -11057,6 +12567,33 @@ type VirtualMachineScaleSetExtensionsDeleteFuture struct { Result func(VirtualMachineScaleSetExtensionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetExtensionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetExtensionsDeleteFuture.Result. +func (future *VirtualMachineScaleSetExtensionsDeleteFuture) result(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetExtensionsUpdateFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetExtensionsUpdateFuture struct { @@ -11066,6 +12603,39 @@ type VirtualMachineScaleSetExtensionsUpdateFuture struct { Result func(VirtualMachineScaleSetExtensionsClient) (VirtualMachineScaleSetExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetExtensionsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetExtensionsUpdateFuture.Result. +func (future *VirtualMachineScaleSetExtensionsUpdateFuture) result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { + vmsse, err = client.UpdateResponder(vmsse.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetExtensionUpdate describes a Virtual Machine Scale Set Extension. type VirtualMachineScaleSetExtensionUpdate struct { // Name - READ-ONLY; The name of the extension. @@ -12251,6 +13821,33 @@ type VirtualMachineScaleSetRollingUpgradesCancelFuture struct { Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetRollingUpgradesCancelFuture.Result. +func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture an abstraction for monitoring and // retrieving the results of a long-running operation. type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct { @@ -12260,6 +13857,33 @@ type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct { Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture.Result. +func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { @@ -12269,6 +13893,33 @@ type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture.Result. +func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetsCreateOrUpdateFuture struct { @@ -12278,6 +13929,39 @@ type VirtualMachineScaleSetsCreateOrUpdateFuture struct { Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsCreateOrUpdateFuture.Result. +func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { + vmss, err = client.CreateOrUpdateResponder(vmss.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetsDeallocateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsDeallocateFuture struct { @@ -12287,6 +13971,33 @@ type VirtualMachineScaleSetsDeallocateFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsDeallocateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsDeallocateFuture.Result. +func (future *VirtualMachineScaleSetsDeallocateFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsDeleteFuture struct { @@ -12296,6 +14007,33 @@ type VirtualMachineScaleSetsDeleteFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsDeleteFuture.Result. +func (future *VirtualMachineScaleSetsDeleteFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsDeleteInstancesFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetsDeleteInstancesFuture struct { @@ -12305,6 +14043,33 @@ type VirtualMachineScaleSetsDeleteInstancesFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsDeleteInstancesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsDeleteInstancesFuture.Result. +func (future *VirtualMachineScaleSetsDeleteInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetSku describes an available virtual machine scale set sku. type VirtualMachineScaleSetSku struct { // ResourceType - READ-ONLY; The type of resource the sku applies to. @@ -12336,6 +14101,33 @@ type VirtualMachineScaleSetsPerformMaintenanceFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsPerformMaintenanceFuture.Result. +func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsPowerOffFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsPowerOffFuture struct { @@ -12345,6 +14137,33 @@ type VirtualMachineScaleSetsPowerOffFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsPowerOffFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsPowerOffFuture.Result. +func (future *VirtualMachineScaleSetsPowerOffFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsRedeployFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsRedeployFuture struct { @@ -12354,6 +14173,33 @@ type VirtualMachineScaleSetsRedeployFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsRedeployFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsRedeployFuture.Result. +func (future *VirtualMachineScaleSetsRedeployFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsReimageAllFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsReimageAllFuture struct { @@ -12363,6 +14209,33 @@ type VirtualMachineScaleSetsReimageAllFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsReimageAllFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsReimageAllFuture.Result. +func (future *VirtualMachineScaleSetsReimageAllFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageAllFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsReimageFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsReimageFuture struct { @@ -12372,6 +14245,33 @@ type VirtualMachineScaleSetsReimageFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsReimageFuture.Result. +func (future *VirtualMachineScaleSetsReimageFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetsRestartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetsRestartFuture struct { @@ -12381,14 +14281,68 @@ type VirtualMachineScaleSetsRestartFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } -// VirtualMachineScaleSetsSetOrchestrationServiceStateFuture an abstraction for monitoring and retrieving -// the results of a long-running operation. -type VirtualMachineScaleSetsSetOrchestrationServiceStateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) -} +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsRestartFuture.Result. +func (future *VirtualMachineScaleSetsRestartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsSetOrchestrationServiceStateFuture an abstraction for monitoring and retrieving +// the results of a long-running operation. +type VirtualMachineScaleSetsSetOrchestrationServiceStateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsSetOrchestrationServiceStateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsSetOrchestrationServiceStateFuture.Result. +func (future *VirtualMachineScaleSetsSetOrchestrationServiceStateFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsSetOrchestrationServiceStateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsSetOrchestrationServiceStateFuture") + return + } + ar.Response = future.Response() + return +} // VirtualMachineScaleSetsStartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. @@ -12399,6 +14353,33 @@ type VirtualMachineScaleSetsStartFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsStartFuture.Result. +func (future *VirtualMachineScaleSetsStartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetStorageProfile describes a virtual machine scale set storage profile. type VirtualMachineScaleSetStorageProfile struct { // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. @@ -12418,6 +14399,39 @@ type VirtualMachineScaleSetsUpdateFuture struct { Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsUpdateFuture.Result. +func (future *VirtualMachineScaleSetsUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { + vmss, err = client.UpdateResponder(vmss.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetsUpdateInstancesFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualMachineScaleSetsUpdateInstancesFuture struct { @@ -12427,6 +14441,33 @@ type VirtualMachineScaleSetsUpdateInstancesFuture struct { Result func(VirtualMachineScaleSetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetsUpdateInstancesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetsUpdateInstancesFuture.Result. +func (future *VirtualMachineScaleSetsUpdateInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetUpdate describes a Virtual Machine Scale Set. type VirtualMachineScaleSetUpdate struct { // Sku - The virtual machine scale set sku. @@ -13087,6 +15128,39 @@ type VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture struct { Result func(VirtualMachineScaleSetVMExtensionsClient) (VirtualMachineScaleSetVMExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture.Result. +func (future *VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture) result(client VirtualMachineScaleSetVMExtensionsClient) (vmssve VirtualMachineScaleSetVMExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmssve.Response.Response, err = future.GetResult(sender); err == nil && vmssve.Response.Response.StatusCode != http.StatusNoContent { + vmssve, err = client.CreateOrUpdateResponder(vmssve.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", vmssve.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetVMExtensionsDeleteFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualMachineScaleSetVMExtensionsDeleteFuture struct { @@ -13096,6 +15170,33 @@ type VirtualMachineScaleSetVMExtensionsDeleteFuture struct { Result func(VirtualMachineScaleSetVMExtensionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMExtensionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMExtensionsDeleteFuture.Result. +func (future *VirtualMachineScaleSetVMExtensionsDeleteFuture) result(client VirtualMachineScaleSetVMExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMExtensionsListResult the List VMSS VM Extension operation response type VirtualMachineScaleSetVMExtensionsListResult struct { autorest.Response `json:"-"` @@ -13121,6 +15222,39 @@ type VirtualMachineScaleSetVMExtensionsUpdateFuture struct { Result func(VirtualMachineScaleSetVMExtensionsClient) (VirtualMachineScaleSetVMExtension, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMExtensionsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMExtensionsUpdateFuture.Result. +func (future *VirtualMachineScaleSetVMExtensionsUpdateFuture) result(client VirtualMachineScaleSetVMExtensionsClient) (vmssve VirtualMachineScaleSetVMExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmssve.Response.Response, err = future.GetResult(sender); err == nil && vmssve.Response.Response.StatusCode != http.StatusNoContent { + vmssve, err = client.UpdateResponder(vmssve.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", vmssve.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetVMExtensionUpdate describes a VMSS VM Extension. type VirtualMachineScaleSetVMExtensionUpdate struct { // Name - READ-ONLY; The name of the extension. @@ -13563,6 +15697,39 @@ type VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture struct { Result func(VirtualMachineScaleSetVMRunCommandsClient) (VirtualMachineRunCommand, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture.Result. +func (future *VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture) result(client VirtualMachineScaleSetVMRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { + vmrc, err = client.CreateOrUpdateResponder(vmrc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetVMRunCommandsDeleteFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualMachineScaleSetVMRunCommandsDeleteFuture struct { @@ -13572,6 +15739,33 @@ type VirtualMachineScaleSetVMRunCommandsDeleteFuture struct { Result func(VirtualMachineScaleSetVMRunCommandsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMRunCommandsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMRunCommandsDeleteFuture.Result. +func (future *VirtualMachineScaleSetVMRunCommandsDeleteFuture) result(client VirtualMachineScaleSetVMRunCommandsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMRunCommandsUpdateFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualMachineScaleSetVMRunCommandsUpdateFuture struct { @@ -13581,6 +15775,39 @@ type VirtualMachineScaleSetVMRunCommandsUpdateFuture struct { Result func(VirtualMachineScaleSetVMRunCommandsClient) (VirtualMachineRunCommand, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMRunCommandsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMRunCommandsUpdateFuture.Result. +func (future *VirtualMachineScaleSetVMRunCommandsUpdateFuture) result(client VirtualMachineScaleSetVMRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { + vmrc, err = client.UpdateResponder(vmrc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetVMsDeallocateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsDeallocateFuture struct { @@ -13590,6 +15817,33 @@ type VirtualMachineScaleSetVMsDeallocateFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsDeallocateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsDeallocateFuture.Result. +func (future *VirtualMachineScaleSetVMsDeallocateFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsDeleteFuture struct { @@ -13599,6 +15853,33 @@ type VirtualMachineScaleSetVMsDeleteFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsDeleteFuture.Result. +func (future *VirtualMachineScaleSetVMsDeleteFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsPerformMaintenanceFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct { @@ -13608,6 +15889,33 @@ type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsPerformMaintenanceFuture.Result. +func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsPowerOffFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsPowerOffFuture struct { @@ -13617,6 +15925,33 @@ type VirtualMachineScaleSetVMsPowerOffFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsPowerOffFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsPowerOffFuture.Result. +func (future *VirtualMachineScaleSetVMsPowerOffFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsRedeployFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsRedeployFuture struct { @@ -13626,6 +15961,33 @@ type VirtualMachineScaleSetVMsRedeployFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsRedeployFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsRedeployFuture.Result. +func (future *VirtualMachineScaleSetVMsRedeployFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsReimageAllFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsReimageAllFuture struct { @@ -13635,6 +15997,33 @@ type VirtualMachineScaleSetVMsReimageAllFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsReimageAllFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsReimageAllFuture.Result. +func (future *VirtualMachineScaleSetVMsReimageAllFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsReimageFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsReimageFuture struct { @@ -13644,6 +16033,33 @@ type VirtualMachineScaleSetVMsReimageFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsReimageFuture.Result. +func (future *VirtualMachineScaleSetVMsReimageFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsRestartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsRestartFuture struct { @@ -13653,6 +16069,33 @@ type VirtualMachineScaleSetVMsRestartFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsRestartFuture.Result. +func (future *VirtualMachineScaleSetVMsRestartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsRunCommandFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsRunCommandFuture struct { @@ -13662,6 +16105,39 @@ type VirtualMachineScaleSetVMsRunCommandFuture struct { Result func(VirtualMachineScaleSetVMsClient) (RunCommandResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsRunCommandFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsRunCommandFuture.Result. +func (future *VirtualMachineScaleSetVMsRunCommandFuture) result(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { + rcr, err = client.RunCommandResponder(rcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineScaleSetVMsStartFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsStartFuture struct { @@ -13671,6 +16147,33 @@ type VirtualMachineScaleSetVMsStartFuture struct { Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsStartFuture.Result. +func (future *VirtualMachineScaleSetVMsStartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetVMsUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachineScaleSetVMsUpdateFuture struct { @@ -13680,6 +16183,39 @@ type VirtualMachineScaleSetVMsUpdateFuture struct { Result func(VirtualMachineScaleSetVMsClient) (VirtualMachineScaleSetVM, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachineScaleSetVMsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachineScaleSetVMsUpdateFuture.Result. +func (future *VirtualMachineScaleSetVMsUpdateFuture) result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmssv.Response.Response, err = future.GetResult(sender); err == nil && vmssv.Response.Response.StatusCode != http.StatusNoContent { + vmssv, err = client.UpdateResponder(vmssv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", vmssv.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesCaptureFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesCaptureFuture struct { @@ -13689,6 +16225,39 @@ type VirtualMachinesCaptureFuture struct { Result func(VirtualMachinesClient) (VirtualMachineCaptureResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesCaptureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesCaptureFuture.Result. +func (future *VirtualMachinesCaptureFuture) result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmcr.Response.Response, err = future.GetResult(sender); err == nil && vmcr.Response.Response.StatusCode != http.StatusNoContent { + vmcr, err = client.CaptureResponder(vmcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", vmcr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesConvertToManagedDisksFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesConvertToManagedDisksFuture struct { @@ -13698,6 +16267,33 @@ type VirtualMachinesConvertToManagedDisksFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesConvertToManagedDisksFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesConvertToManagedDisksFuture.Result. +func (future *VirtualMachinesConvertToManagedDisksFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesConvertToManagedDisksFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesCreateOrUpdateFuture struct { @@ -13707,6 +16303,39 @@ type VirtualMachinesCreateOrUpdateFuture struct { Result func(VirtualMachinesClient) (VirtualMachine, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesCreateOrUpdateFuture.Result. +func (future *VirtualMachinesCreateOrUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { + VM, err = client.CreateOrUpdateResponder(VM.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesDeallocateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesDeallocateFuture struct { @@ -13716,6 +16345,33 @@ type VirtualMachinesDeallocateFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesDeallocateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesDeallocateFuture.Result. +func (future *VirtualMachinesDeallocateFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesDeleteFuture struct { @@ -13725,6 +16381,33 @@ type VirtualMachinesDeleteFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesDeleteFuture.Result. +func (future *VirtualMachinesDeleteFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineSize describes the properties of a VM size. type VirtualMachineSize struct { // Name - The name of the virtual machine size. @@ -13781,6 +16464,33 @@ type VirtualMachinesPerformMaintenanceFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesPerformMaintenanceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesPerformMaintenanceFuture.Result. +func (future *VirtualMachinesPerformMaintenanceFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesPowerOffFuture struct { @@ -13790,6 +16500,33 @@ type VirtualMachinesPowerOffFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesPowerOffFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesPowerOffFuture.Result. +func (future *VirtualMachinesPowerOffFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesReapplyFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesReapplyFuture struct { @@ -13799,6 +16536,33 @@ type VirtualMachinesReapplyFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesReapplyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesReapplyFuture.Result. +func (future *VirtualMachinesReapplyFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReapplyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReapplyFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesRedeployFuture struct { @@ -13808,6 +16572,33 @@ type VirtualMachinesRedeployFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesRedeployFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesRedeployFuture.Result. +func (future *VirtualMachinesRedeployFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesReimageFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesReimageFuture struct { @@ -13817,6 +16608,33 @@ type VirtualMachinesReimageFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesReimageFuture.Result. +func (future *VirtualMachinesReimageFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReimageFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesRestartFuture struct { @@ -13826,6 +16644,33 @@ type VirtualMachinesRestartFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesRestartFuture.Result. +func (future *VirtualMachinesRestartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRunCommandFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualMachinesRunCommandFuture struct { @@ -13835,6 +16680,39 @@ type VirtualMachinesRunCommandFuture struct { Result func(VirtualMachinesClient) (RunCommandResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesRunCommandFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesRunCommandFuture.Result. +func (future *VirtualMachinesRunCommandFuture) result(client VirtualMachinesClient) (rcr RunCommandResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { + rcr, err = client.RunCommandResponder(rcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachinesStartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesStartFuture struct { @@ -13844,6 +16722,33 @@ type VirtualMachinesStartFuture struct { Result func(VirtualMachinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesStartFuture.Result. +func (future *VirtualMachinesStartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineStatusCodeCount the status code and count of the virtual machine scale set instance view // status summary. type VirtualMachineStatusCodeCount struct { @@ -13862,6 +16767,39 @@ type VirtualMachinesUpdateFuture struct { Result func(VirtualMachinesClient) (VirtualMachine, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualMachinesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualMachinesUpdateFuture.Result. +func (future *VirtualMachinesUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { + VM, err = client.UpdateResponder(VM.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualMachineUpdate describes a Virtual Machine Update. type VirtualMachineUpdate struct { // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/operations.go index a34d64f80ac..33bdb3ef421 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/operations.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/proximityplacementgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/proximityplacementgroups.go index d8a81fe79e8..cf0d726c36f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/proximityplacementgroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/proximityplacementgroups.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/resourceskus.go index 7bbda330f1e..9c967d93aa8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/resourceskus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/resourceskus.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/snapshots.go index f56637b2058..388ce70153c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/snapshots.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/snapshots.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -124,30 +113,7 @@ func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future Sn var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (s Snapshot, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.CreateOrUpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsD var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -403,30 +356,7 @@ func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future Snaps var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (au AccessURI, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - au.Response.Response, err = future.GetResult(sender) - if au.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", nil, "received nil response and error") - } - if err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -734,20 +664,7 @@ func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future Snap var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -828,30 +745,7 @@ func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsU var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SnapshotsClient) (s Snapshot, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.UpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/sshpublickeys.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/sshpublickeys.go index 05d43ecd109..34956139335 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/sshpublickeys.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/sshpublickeys.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/usage.go index 390ddd6c074..4d87b999973 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/usage.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/version.go index 99a83430043..d842497b6ff 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/version.go @@ -2,19 +2,8 @@ package compute import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensionimages.go index 2659d48cfcb..47a022c8910 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensionimages.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensions.go index dc206c0f3e7..54205146a0b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineextensions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -109,30 +98,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vme.Response.Response, err = future.GetResult(sender) - if vme.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { - vme, err = client.CreateOrUpdateResponder(vme.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -212,20 +178,7 @@ func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -469,30 +422,7 @@ func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vme.Response.Response, err = future.GetResult(sender) - if vme.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { - vme, err = client.UpdateResponder(vme.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineimages.go index f9a6f4aa45c..5311385d167 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineimages.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineruncommands.go index 69be71e1dc6..b3b4cab3f25 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachineruncommands.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client VirtualMachineRunCommandsClient) CreateOrUpdateSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmrc.Response.Response, err = future.GetResult(sender) - if vmrc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { - vmrc, err = client.CreateOrUpdateResponder(vmrc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsCreateOrUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client VirtualMachineRunCommandsClient) DeleteSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineRunCommandsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -716,30 +669,7 @@ func (client VirtualMachineRunCommandsClient) UpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmrc.Response.Response, err = future.GetResult(sender) - if vmrc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { - vmrc, err = client.UpdateResponder(vmrc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachines.go index a113ca55d56..a2ac95dbcc7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachines.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -104,30 +93,7 @@ func (client VirtualMachinesClient) AssessPatchesSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (vmapr VirtualMachineAssessPatchesResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesAssessPatchesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesAssessPatchesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmapr.Response.Response, err = future.GetResult(sender) - if vmapr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesAssessPatchesFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmapr.Response.Response.StatusCode != http.StatusNoContent { - vmapr, err = client.AssessPatchesResponder(vmapr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesAssessPatchesFuture", "Result", vmapr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -217,30 +183,7 @@ func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future Vir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmcr.Response.Response, err = future.GetResult(sender) - if vmcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmcr.Response.Response.StatusCode != http.StatusNoContent { - vmcr, err = client.CaptureResponder(vmcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", vmcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -319,20 +262,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesConvertToManagedDisksFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -435,30 +365,7 @@ func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (VM VirtualMachine, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - VM.Response.Response, err = future.GetResult(sender) - if VM.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { - VM, err = client.CreateOrUpdateResponder(VM.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -537,20 +444,7 @@ func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -631,20 +525,7 @@ func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1390,20 +1271,7 @@ func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1489,20 +1357,7 @@ func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future Vi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1579,20 +1434,7 @@ func (client VirtualMachinesClient) ReapplySender(req *http.Request) (future Vir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReapplyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReapplyFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1669,20 +1511,7 @@ func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future Vi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1765,20 +1594,7 @@ func (client VirtualMachinesClient) ReimageSender(req *http.Request) (future Vir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReimageFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1855,20 +1671,7 @@ func (client VirtualMachinesClient) RestartSender(req *http.Request) (future Vir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -2036,30 +1839,7 @@ func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (rcr RunCommandResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rcr.Response.Response, err = future.GetResult(sender) - if rcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", nil, "received nil response and error") - } - if err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { - rcr, err = client.RunCommandResponder(rcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -2212,20 +1992,7 @@ func (client VirtualMachinesClient) StartSender(req *http.Request) (future Virtu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -2305,30 +2072,7 @@ func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachinesClient) (VM VirtualMachine, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - VM.Response.Response, err = future.GetResult(sender) - if VM.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { - VM, err = client.UpdateResponder(VM.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetextensions.go index ebdbd60b217..69ac2cb71f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetextensions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmsse.Response.Response, err = future.GetResult(sender) - if vmsse.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { - vmsse, err = client.CreateOrUpdateResponder(vmsse.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -510,30 +463,7 @@ func (client VirtualMachineScaleSetExtensionsClient) UpdateSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmsse.Response.Response, err = future.GetResult(sender) - if vmsse.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { - vmsse, err = client.UpdateResponder(vmsse.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetrollingupgrades.go index 6fc792f3a49..6e2a1a5f0b3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetrollingupgrades.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -105,20 +94,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -273,20 +249,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeS var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -364,20 +327,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(r var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesets.go index ea8a42bb045..43b9416b236 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesets.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -210,30 +199,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmss.Response.Response, err = future.GetResult(sender) - if vmss.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { - vmss, err = client.CreateOrUpdateResponder(vmss.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -318,20 +284,7 @@ func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -408,20 +361,7 @@ func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -507,20 +447,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1304,20 +1231,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1409,20 +1323,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1506,20 +1407,7 @@ func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1603,20 +1491,7 @@ func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1700,20 +1575,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageAllFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1796,20 +1658,7 @@ func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1889,20 +1738,7 @@ func (client VirtualMachineScaleSetsClient) SetOrchestrationServiceStateSender(r var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsSetOrchestrationServiceStateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsSetOrchestrationServiceStateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1985,20 +1821,7 @@ func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -2078,30 +1901,7 @@ func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmss.Response.Response, err = future.GetResult(sender) - if vmss.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { - vmss, err = client.UpdateResponder(vmss.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -2188,20 +1988,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmextensions.go index 6bc28079023..0591e5f4255 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmextensions.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -114,30 +103,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMExtensionsClient) (vmssve VirtualMachineScaleSetVMExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmssve.Response.Response, err = future.GetResult(sender) - if vmssve.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmssve.Response.Response.StatusCode != http.StatusNoContent { - vmssve, err = client.CreateOrUpdateResponder(vmssve.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", vmssve.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -219,20 +185,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) DeleteSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMExtensionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -484,30 +437,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) UpdateSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMExtensionsClient) (vmssve VirtualMachineScaleSetVMExtension, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmssve.Response.Response, err = future.GetResult(sender) - if vmssve.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmssve.Response.Response.StatusCode != http.StatusNoContent { - vmssve, err = client.UpdateResponder(vmssve.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", vmssve.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmruncommands.go index ce00f29c2c4..7028c03183e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvmruncommands.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -112,30 +101,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) CreateOrUpdateSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmrc.Response.Response, err = future.GetResult(sender) - if vmrc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { - vmrc, err = client.CreateOrUpdateResponder(vmrc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -217,20 +183,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) DeleteSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMRunCommandsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -522,30 +475,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) UpdateSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmrc.Response.Response, err = future.GetResult(sender) - if vmrc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent { - vmrc, err = client.UpdateResponder(vmrc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvms.go index dab41c1cd29..d74e440fe62 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinescalesetvms.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -109,20 +98,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -201,20 +177,7 @@ func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -584,20 +547,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -685,20 +635,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -778,20 +715,7 @@ func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -876,20 +800,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -969,20 +880,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1061,20 +959,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1247,30 +1132,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rcr.Response.Response, err = future.GetResult(sender) - if rcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", nil, "received nil response and error") - } - if err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { - rcr, err = client.RunCommandResponder(rcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1427,20 +1289,7 @@ func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1547,30 +1396,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vmssv.Response.Response, err = future.GetResult(sender) - if vmssv.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vmssv.Response.Response.StatusCode != http.StatusNoContent { - vmssv, err = client.UpdateResponder(vmssv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", vmssv.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinesizes.go index 1e5ac2c6638..c779fef1c54 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute/virtualmachinesizes.go @@ -1,18 +1,7 @@ package compute -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/CHANGELOG.md index a2da711a739..aaa52c83a9d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/CHANGELOG.md @@ -1,5 +1,51 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/cosmos-db/resource-manager/readme.md tag: `package-2019-08` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *CassandraResourcesCreateUpdateCassandraKeyspaceFuture.UnmarshalJSON([]byte) error +1. *CassandraResourcesCreateUpdateCassandraTableFuture.UnmarshalJSON([]byte) error +1. *CassandraResourcesDeleteCassandraKeyspaceFuture.UnmarshalJSON([]byte) error +1. *CassandraResourcesDeleteCassandraTableFuture.UnmarshalJSON([]byte) error +1. *CassandraResourcesUpdateCassandraKeyspaceThroughputFuture.UnmarshalJSON([]byte) error +1. *CassandraResourcesUpdateCassandraTableThroughputFuture.UnmarshalJSON([]byte) error +1. *DatabaseAccountsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DatabaseAccountsDeleteFuture.UnmarshalJSON([]byte) error +1. *DatabaseAccountsFailoverPriorityChangeFuture.UnmarshalJSON([]byte) error +1. *DatabaseAccountsOfflineRegionFuture.UnmarshalJSON([]byte) error +1. *DatabaseAccountsOnlineRegionFuture.UnmarshalJSON([]byte) error +1. *DatabaseAccountsRegenerateKeyFuture.UnmarshalJSON([]byte) error +1. *DatabaseAccountsUpdateFuture.UnmarshalJSON([]byte) error +1. *GremlinResourcesCreateUpdateGremlinDatabaseFuture.UnmarshalJSON([]byte) error +1. *GremlinResourcesCreateUpdateGremlinGraphFuture.UnmarshalJSON([]byte) error +1. *GremlinResourcesDeleteGremlinDatabaseFuture.UnmarshalJSON([]byte) error +1. *GremlinResourcesDeleteGremlinGraphFuture.UnmarshalJSON([]byte) error +1. *GremlinResourcesUpdateGremlinDatabaseThroughputFuture.UnmarshalJSON([]byte) error +1. *GremlinResourcesUpdateGremlinGraphThroughputFuture.UnmarshalJSON([]byte) error +1. *MongoDBResourcesCreateUpdateMongoDBCollectionFuture.UnmarshalJSON([]byte) error +1. *MongoDBResourcesCreateUpdateMongoDBDatabaseFuture.UnmarshalJSON([]byte) error +1. *MongoDBResourcesDeleteMongoDBCollectionFuture.UnmarshalJSON([]byte) error +1. *MongoDBResourcesDeleteMongoDBDatabaseFuture.UnmarshalJSON([]byte) error +1. *MongoDBResourcesUpdateMongoDBCollectionThroughputFuture.UnmarshalJSON([]byte) error +1. *MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture.UnmarshalJSON([]byte) error +1. *NotebookWorkspacesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *NotebookWorkspacesDeleteFuture.UnmarshalJSON([]byte) error +1. *NotebookWorkspacesRegenerateAuthTokenFuture.UnmarshalJSON([]byte) error +1. *NotebookWorkspacesStartFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesCreateUpdateSQLContainerFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesCreateUpdateSQLDatabaseFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesCreateUpdateSQLStoredProcedureFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesCreateUpdateSQLTriggerFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesDeleteSQLContainerFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesDeleteSQLDatabaseFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesDeleteSQLStoredProcedureFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesDeleteSQLTriggerFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesDeleteSQLUserDefinedFunctionFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesUpdateSQLContainerThroughputFuture.UnmarshalJSON([]byte) error +1. *SQLResourcesUpdateSQLDatabaseThroughputFuture.UnmarshalJSON([]byte) error +1. *TableResourcesCreateUpdateTableFuture.UnmarshalJSON([]byte) error +1. *TableResourcesDeleteTableFuture.UnmarshalJSON([]byte) error +1. *TableResourcesUpdateTableThroughputFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/_meta.json new file mode 100644 index 00000000000..50753ce70ae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/cosmos-db/resource-manager/readme.md", + "tag": "package-2019-08", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2019-08 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/cosmos-db/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/cassandraresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/cassandraresources.go index c321ef1ecd9..0e4828bf067 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/cassandraresources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/cassandraresources.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -130,30 +119,7 @@ func (client CassandraResourcesClient) CreateUpdateCassandraKeyspaceSender(req * var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client CassandraResourcesClient) (ckgr CassandraKeyspaceGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraKeyspaceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesCreateUpdateCassandraKeyspaceFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ckgr.Response.Response, err = future.GetResult(sender) - if ckgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraKeyspaceFuture", "Result", nil, "received nil response and error") - } - if err == nil && ckgr.Response.Response.StatusCode != http.StatusNoContent { - ckgr, err = client.CreateUpdateCassandraKeyspaceResponder(ckgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraKeyspaceFuture", "Result", ckgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -258,30 +224,7 @@ func (client CassandraResourcesClient) CreateUpdateCassandraTableSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client CassandraResourcesClient) (ctgr CassandraTableGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesCreateUpdateCassandraTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ctgr.Response.Response, err = future.GetResult(sender) - if ctgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ctgr.Response.Response.StatusCode != http.StatusNoContent { - ctgr, err = client.CreateUpdateCassandraTableResponder(ctgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraTableFuture", "Result", ctgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -375,20 +318,7 @@ func (client CassandraResourcesClient) DeleteCassandraKeyspaceSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client CassandraResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesDeleteCassandraKeyspaceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesDeleteCassandraKeyspaceFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -483,20 +413,7 @@ func (client CassandraResourcesClient) DeleteCassandraTableSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client CassandraResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesDeleteCassandraTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesDeleteCassandraTableFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1155,30 +1072,7 @@ func (client CassandraResourcesClient) UpdateCassandraKeyspaceThroughputSender(r var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client CassandraResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraKeyspaceThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesUpdateCassandraKeyspaceThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraKeyspaceThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateCassandraKeyspaceThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraKeyspaceThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1283,30 +1177,7 @@ func (client CassandraResourcesClient) UpdateCassandraTableThroughputSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client CassandraResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraTableThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesUpdateCassandraTableThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraTableThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateCassandraTableThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraTableThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/client.go index 126e99bc5f6..24c25493a5f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/client.go @@ -3,19 +3,8 @@ // Azure Cosmos DB Database Service Resource Provider REST API package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collection.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collection.go index 92c08a98477..bb21cc70da5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collection.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collection.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartition.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartition.go index 77d80fd3fd5..c201ae3399a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartition.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartition.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartitionregion.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartitionregion.go index e59c95b0e96..1eaf4887be5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartitionregion.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionpartitionregion.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionregion.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionregion.go index fa7cb14ef92..96332d537d8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionregion.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/collectionregion.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/database.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/database.go index 6ffaccb93ff..1f9b8372ff6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/database.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/database.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccountregion.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccountregion.go index 4b9c211ca3f..3fe1f8f9388 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccountregion.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccountregion.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccounts.go index eddbf83ea76..c7683e812d1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccounts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/databaseaccounts.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -219,30 +208,7 @@ func (client DatabaseAccountsClient) CreateOrUpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DatabaseAccountsClient) (dagr DatabaseAccountGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dagr.Response.Response, err = future.GetResult(sender) - if dagr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dagr.Response.Response.StatusCode != http.StatusNoContent { - dagr, err = client.CreateOrUpdateResponder(dagr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsCreateOrUpdateFuture", "Result", dagr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -334,20 +300,7 @@ func (client DatabaseAccountsClient) DeleteSender(req *http.Request) (future Dat var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DatabaseAccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -445,20 +398,7 @@ func (client DatabaseAccountsClient) FailoverPriorityChangeSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DatabaseAccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsFailoverPriorityChangeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsFailoverPriorityChangeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1444,20 +1384,7 @@ func (client DatabaseAccountsClient) OfflineRegionSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DatabaseAccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsOfflineRegionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsOfflineRegionFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1553,20 +1480,7 @@ func (client DatabaseAccountsClient) OnlineRegionSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DatabaseAccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsOnlineRegionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsOnlineRegionFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1660,20 +1574,7 @@ func (client DatabaseAccountsClient) RegenerateKeySender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DatabaseAccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsRegenerateKeyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsRegenerateKeyFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1767,30 +1668,7 @@ func (client DatabaseAccountsClient) UpdateSender(req *http.Request) (future Dat var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DatabaseAccountsClient) (dagr DatabaseAccountGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dagr.Response.Response, err = future.GetResult(sender) - if dagr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dagr.Response.Response.StatusCode != http.StatusNoContent { - dagr, err = client.UpdateResponder(dagr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsUpdateFuture", "Result", dagr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/enums.go index 8546eff9542..4cfd627a291 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/enums.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/gremlinresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/gremlinresources.go index fe9e6cde62c..04fc01cfa8a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/gremlinresources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/gremlinresources.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -130,30 +119,7 @@ func (client GremlinResourcesClient) CreateUpdateGremlinDatabaseSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GremlinResourcesClient) (gdgr GremlinDatabaseGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinDatabaseFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesCreateUpdateGremlinDatabaseFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gdgr.Response.Response, err = future.GetResult(sender) - if gdgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinDatabaseFuture", "Result", nil, "received nil response and error") - } - if err == nil && gdgr.Response.Response.StatusCode != http.StatusNoContent { - gdgr, err = client.CreateUpdateGremlinDatabaseResponder(gdgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinDatabaseFuture", "Result", gdgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -265,30 +231,7 @@ func (client GremlinResourcesClient) CreateUpdateGremlinGraphSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GremlinResourcesClient) (gggr GremlinGraphGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinGraphFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesCreateUpdateGremlinGraphFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gggr.Response.Response, err = future.GetResult(sender) - if gggr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinGraphFuture", "Result", nil, "received nil response and error") - } - if err == nil && gggr.Response.Response.StatusCode != http.StatusNoContent { - gggr, err = client.CreateUpdateGremlinGraphResponder(gggr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinGraphFuture", "Result", gggr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -382,20 +325,7 @@ func (client GremlinResourcesClient) DeleteGremlinDatabaseSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GremlinResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesDeleteGremlinDatabaseFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesDeleteGremlinDatabaseFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -490,20 +420,7 @@ func (client GremlinResourcesClient) DeleteGremlinGraphSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GremlinResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesDeleteGremlinGraphFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesDeleteGremlinGraphFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1162,30 +1079,7 @@ func (client GremlinResourcesClient) UpdateGremlinDatabaseThroughputSender(req * var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GremlinResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinDatabaseThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesUpdateGremlinDatabaseThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinDatabaseThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateGremlinDatabaseThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinDatabaseThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1289,30 +1183,7 @@ func (client GremlinResourcesClient) UpdateGremlinGraphThroughputSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GremlinResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinGraphThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesUpdateGremlinGraphThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinGraphThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateGremlinGraphThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinGraphThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/models.go index 7c1909de28d..4a1322eb50d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/models.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -348,6 +337,39 @@ type CassandraResourcesCreateUpdateCassandraKeyspaceFuture struct { Result func(CassandraResourcesClient) (CassandraKeyspaceGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CassandraResourcesCreateUpdateCassandraKeyspaceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CassandraResourcesCreateUpdateCassandraKeyspaceFuture.Result. +func (future *CassandraResourcesCreateUpdateCassandraKeyspaceFuture) result(client CassandraResourcesClient) (ckgr CassandraKeyspaceGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraKeyspaceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesCreateUpdateCassandraKeyspaceFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ckgr.Response.Response, err = future.GetResult(sender); err == nil && ckgr.Response.Response.StatusCode != http.StatusNoContent { + ckgr, err = client.CreateUpdateCassandraKeyspaceResponder(ckgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraKeyspaceFuture", "Result", ckgr.Response.Response, "Failure responding to request") + } + } + return +} + // CassandraResourcesCreateUpdateCassandraTableFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type CassandraResourcesCreateUpdateCassandraTableFuture struct { @@ -357,6 +379,39 @@ type CassandraResourcesCreateUpdateCassandraTableFuture struct { Result func(CassandraResourcesClient) (CassandraTableGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CassandraResourcesCreateUpdateCassandraTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CassandraResourcesCreateUpdateCassandraTableFuture.Result. +func (future *CassandraResourcesCreateUpdateCassandraTableFuture) result(client CassandraResourcesClient) (ctgr CassandraTableGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesCreateUpdateCassandraTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ctgr.Response.Response, err = future.GetResult(sender); err == nil && ctgr.Response.Response.StatusCode != http.StatusNoContent { + ctgr, err = client.CreateUpdateCassandraTableResponder(ctgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesCreateUpdateCassandraTableFuture", "Result", ctgr.Response.Response, "Failure responding to request") + } + } + return +} + // CassandraResourcesDeleteCassandraKeyspaceFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type CassandraResourcesDeleteCassandraKeyspaceFuture struct { @@ -366,6 +421,33 @@ type CassandraResourcesDeleteCassandraKeyspaceFuture struct { Result func(CassandraResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CassandraResourcesDeleteCassandraKeyspaceFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CassandraResourcesDeleteCassandraKeyspaceFuture.Result. +func (future *CassandraResourcesDeleteCassandraKeyspaceFuture) result(client CassandraResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesDeleteCassandraKeyspaceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesDeleteCassandraKeyspaceFuture") + return + } + ar.Response = future.Response() + return +} + // CassandraResourcesDeleteCassandraTableFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type CassandraResourcesDeleteCassandraTableFuture struct { @@ -375,6 +457,33 @@ type CassandraResourcesDeleteCassandraTableFuture struct { Result func(CassandraResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CassandraResourcesDeleteCassandraTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CassandraResourcesDeleteCassandraTableFuture.Result. +func (future *CassandraResourcesDeleteCassandraTableFuture) result(client CassandraResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesDeleteCassandraTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesDeleteCassandraTableFuture") + return + } + ar.Response = future.Response() + return +} + // CassandraResourcesUpdateCassandraKeyspaceThroughputFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type CassandraResourcesUpdateCassandraKeyspaceThroughputFuture struct { @@ -384,6 +493,39 @@ type CassandraResourcesUpdateCassandraKeyspaceThroughputFuture struct { Result func(CassandraResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CassandraResourcesUpdateCassandraKeyspaceThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CassandraResourcesUpdateCassandraKeyspaceThroughputFuture.Result. +func (future *CassandraResourcesUpdateCassandraKeyspaceThroughputFuture) result(client CassandraResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraKeyspaceThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesUpdateCassandraKeyspaceThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateCassandraKeyspaceThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraKeyspaceThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // CassandraResourcesUpdateCassandraTableThroughputFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type CassandraResourcesUpdateCassandraTableThroughputFuture struct { @@ -393,6 +535,39 @@ type CassandraResourcesUpdateCassandraTableThroughputFuture struct { Result func(CassandraResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CassandraResourcesUpdateCassandraTableThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CassandraResourcesUpdateCassandraTableThroughputFuture.Result. +func (future *CassandraResourcesUpdateCassandraTableThroughputFuture) result(client CassandraResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraTableThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.CassandraResourcesUpdateCassandraTableThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateCassandraTableThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.CassandraResourcesUpdateCassandraTableThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // CassandraSchema cosmos DB Cassandra table schema type CassandraSchema struct { // Columns - List of Cassandra table columns. @@ -1112,6 +1287,39 @@ type DatabaseAccountsCreateOrUpdateFuture struct { Result func(DatabaseAccountsClient) (DatabaseAccountGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DatabaseAccountsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DatabaseAccountsCreateOrUpdateFuture.Result. +func (future *DatabaseAccountsCreateOrUpdateFuture) result(client DatabaseAccountsClient) (dagr DatabaseAccountGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dagr.Response.Response, err = future.GetResult(sender); err == nil && dagr.Response.Response.StatusCode != http.StatusNoContent { + dagr, err = client.CreateOrUpdateResponder(dagr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsCreateOrUpdateFuture", "Result", dagr.Response.Response, "Failure responding to request") + } + } + return +} + // DatabaseAccountsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DatabaseAccountsDeleteFuture struct { @@ -1121,6 +1329,33 @@ type DatabaseAccountsDeleteFuture struct { Result func(DatabaseAccountsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DatabaseAccountsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DatabaseAccountsDeleteFuture.Result. +func (future *DatabaseAccountsDeleteFuture) result(client DatabaseAccountsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DatabaseAccountsFailoverPriorityChangeFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type DatabaseAccountsFailoverPriorityChangeFuture struct { @@ -1130,6 +1365,33 @@ type DatabaseAccountsFailoverPriorityChangeFuture struct { Result func(DatabaseAccountsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DatabaseAccountsFailoverPriorityChangeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DatabaseAccountsFailoverPriorityChangeFuture.Result. +func (future *DatabaseAccountsFailoverPriorityChangeFuture) result(client DatabaseAccountsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsFailoverPriorityChangeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsFailoverPriorityChangeFuture") + return + } + ar.Response = future.Response() + return +} + // DatabaseAccountsListResult the List operation response, that contains the database accounts and their // properties. type DatabaseAccountsListResult struct { @@ -1147,6 +1409,33 @@ type DatabaseAccountsOfflineRegionFuture struct { Result func(DatabaseAccountsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DatabaseAccountsOfflineRegionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DatabaseAccountsOfflineRegionFuture.Result. +func (future *DatabaseAccountsOfflineRegionFuture) result(client DatabaseAccountsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsOfflineRegionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsOfflineRegionFuture") + return + } + ar.Response = future.Response() + return +} + // DatabaseAccountsOnlineRegionFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DatabaseAccountsOnlineRegionFuture struct { @@ -1156,6 +1445,33 @@ type DatabaseAccountsOnlineRegionFuture struct { Result func(DatabaseAccountsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DatabaseAccountsOnlineRegionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DatabaseAccountsOnlineRegionFuture.Result. +func (future *DatabaseAccountsOnlineRegionFuture) result(client DatabaseAccountsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsOnlineRegionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsOnlineRegionFuture") + return + } + ar.Response = future.Response() + return +} + // DatabaseAccountsRegenerateKeyFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DatabaseAccountsRegenerateKeyFuture struct { @@ -1165,6 +1481,33 @@ type DatabaseAccountsRegenerateKeyFuture struct { Result func(DatabaseAccountsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DatabaseAccountsRegenerateKeyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DatabaseAccountsRegenerateKeyFuture.Result. +func (future *DatabaseAccountsRegenerateKeyFuture) result(client DatabaseAccountsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsRegenerateKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsRegenerateKeyFuture") + return + } + ar.Response = future.Response() + return +} + // DatabaseAccountsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DatabaseAccountsUpdateFuture struct { @@ -1174,6 +1517,39 @@ type DatabaseAccountsUpdateFuture struct { Result func(DatabaseAccountsClient) (DatabaseAccountGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DatabaseAccountsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DatabaseAccountsUpdateFuture.Result. +func (future *DatabaseAccountsUpdateFuture) result(client DatabaseAccountsClient) (dagr DatabaseAccountGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.DatabaseAccountsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dagr.Response.Response, err = future.GetResult(sender); err == nil && dagr.Response.Response.StatusCode != http.StatusNoContent { + dagr, err = client.UpdateResponder(dagr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsUpdateFuture", "Result", dagr.Response.Response, "Failure responding to request") + } + } + return +} + // DatabaseAccountUpdateParameters parameters for patching Azure Cosmos DB database account properties. type DatabaseAccountUpdateParameters struct { Tags map[string]*string `json:"tags"` @@ -1879,6 +2255,39 @@ type GremlinResourcesCreateUpdateGremlinDatabaseFuture struct { Result func(GremlinResourcesClient) (GremlinDatabaseGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GremlinResourcesCreateUpdateGremlinDatabaseFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GremlinResourcesCreateUpdateGremlinDatabaseFuture.Result. +func (future *GremlinResourcesCreateUpdateGremlinDatabaseFuture) result(client GremlinResourcesClient) (gdgr GremlinDatabaseGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinDatabaseFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesCreateUpdateGremlinDatabaseFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gdgr.Response.Response, err = future.GetResult(sender); err == nil && gdgr.Response.Response.StatusCode != http.StatusNoContent { + gdgr, err = client.CreateUpdateGremlinDatabaseResponder(gdgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinDatabaseFuture", "Result", gdgr.Response.Response, "Failure responding to request") + } + } + return +} + // GremlinResourcesCreateUpdateGremlinGraphFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type GremlinResourcesCreateUpdateGremlinGraphFuture struct { @@ -1888,6 +2297,39 @@ type GremlinResourcesCreateUpdateGremlinGraphFuture struct { Result func(GremlinResourcesClient) (GremlinGraphGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GremlinResourcesCreateUpdateGremlinGraphFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GremlinResourcesCreateUpdateGremlinGraphFuture.Result. +func (future *GremlinResourcesCreateUpdateGremlinGraphFuture) result(client GremlinResourcesClient) (gggr GremlinGraphGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinGraphFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesCreateUpdateGremlinGraphFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gggr.Response.Response, err = future.GetResult(sender); err == nil && gggr.Response.Response.StatusCode != http.StatusNoContent { + gggr, err = client.CreateUpdateGremlinGraphResponder(gggr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesCreateUpdateGremlinGraphFuture", "Result", gggr.Response.Response, "Failure responding to request") + } + } + return +} + // GremlinResourcesDeleteGremlinDatabaseFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type GremlinResourcesDeleteGremlinDatabaseFuture struct { @@ -1897,6 +2339,33 @@ type GremlinResourcesDeleteGremlinDatabaseFuture struct { Result func(GremlinResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GremlinResourcesDeleteGremlinDatabaseFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GremlinResourcesDeleteGremlinDatabaseFuture.Result. +func (future *GremlinResourcesDeleteGremlinDatabaseFuture) result(client GremlinResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesDeleteGremlinDatabaseFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesDeleteGremlinDatabaseFuture") + return + } + ar.Response = future.Response() + return +} + // GremlinResourcesDeleteGremlinGraphFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type GremlinResourcesDeleteGremlinGraphFuture struct { @@ -1906,6 +2375,33 @@ type GremlinResourcesDeleteGremlinGraphFuture struct { Result func(GremlinResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GremlinResourcesDeleteGremlinGraphFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GremlinResourcesDeleteGremlinGraphFuture.Result. +func (future *GremlinResourcesDeleteGremlinGraphFuture) result(client GremlinResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesDeleteGremlinGraphFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesDeleteGremlinGraphFuture") + return + } + ar.Response = future.Response() + return +} + // GremlinResourcesUpdateGremlinDatabaseThroughputFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type GremlinResourcesUpdateGremlinDatabaseThroughputFuture struct { @@ -1915,6 +2411,39 @@ type GremlinResourcesUpdateGremlinDatabaseThroughputFuture struct { Result func(GremlinResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GremlinResourcesUpdateGremlinDatabaseThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GremlinResourcesUpdateGremlinDatabaseThroughputFuture.Result. +func (future *GremlinResourcesUpdateGremlinDatabaseThroughputFuture) result(client GremlinResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinDatabaseThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesUpdateGremlinDatabaseThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateGremlinDatabaseThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinDatabaseThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // GremlinResourcesUpdateGremlinGraphThroughputFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type GremlinResourcesUpdateGremlinGraphThroughputFuture struct { @@ -1924,6 +2453,39 @@ type GremlinResourcesUpdateGremlinGraphThroughputFuture struct { Result func(GremlinResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GremlinResourcesUpdateGremlinGraphThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GremlinResourcesUpdateGremlinGraphThroughputFuture.Result. +func (future *GremlinResourcesUpdateGremlinGraphThroughputFuture) result(client GremlinResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinGraphThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.GremlinResourcesUpdateGremlinGraphThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateGremlinGraphThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.GremlinResourcesUpdateGremlinGraphThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // IncludedPath the paths that are included in indexing type IncludedPath struct { // Path - The path for which the indexing behavior applies to. Index paths typically start with root and end with wildcard (/path/*) @@ -2642,6 +3204,39 @@ type MongoDBResourcesCreateUpdateMongoDBCollectionFuture struct { Result func(MongoDBResourcesClient) (MongoDBCollectionGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *MongoDBResourcesCreateUpdateMongoDBCollectionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for MongoDBResourcesCreateUpdateMongoDBCollectionFuture.Result. +func (future *MongoDBResourcesCreateUpdateMongoDBCollectionFuture) result(client MongoDBResourcesClient) (mdcgr MongoDBCollectionGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBCollectionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesCreateUpdateMongoDBCollectionFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if mdcgr.Response.Response, err = future.GetResult(sender); err == nil && mdcgr.Response.Response.StatusCode != http.StatusNoContent { + mdcgr, err = client.CreateUpdateMongoDBCollectionResponder(mdcgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBCollectionFuture", "Result", mdcgr.Response.Response, "Failure responding to request") + } + } + return +} + // MongoDBResourcesCreateUpdateMongoDBDatabaseFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type MongoDBResourcesCreateUpdateMongoDBDatabaseFuture struct { @@ -2651,6 +3246,39 @@ type MongoDBResourcesCreateUpdateMongoDBDatabaseFuture struct { Result func(MongoDBResourcesClient) (MongoDBDatabaseGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *MongoDBResourcesCreateUpdateMongoDBDatabaseFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for MongoDBResourcesCreateUpdateMongoDBDatabaseFuture.Result. +func (future *MongoDBResourcesCreateUpdateMongoDBDatabaseFuture) result(client MongoDBResourcesClient) (mddgr MongoDBDatabaseGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBDatabaseFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesCreateUpdateMongoDBDatabaseFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if mddgr.Response.Response, err = future.GetResult(sender); err == nil && mddgr.Response.Response.StatusCode != http.StatusNoContent { + mddgr, err = client.CreateUpdateMongoDBDatabaseResponder(mddgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBDatabaseFuture", "Result", mddgr.Response.Response, "Failure responding to request") + } + } + return +} + // MongoDBResourcesDeleteMongoDBCollectionFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type MongoDBResourcesDeleteMongoDBCollectionFuture struct { @@ -2660,13 +3288,67 @@ type MongoDBResourcesDeleteMongoDBCollectionFuture struct { Result func(MongoDBResourcesClient) (autorest.Response, error) } -// MongoDBResourcesDeleteMongoDBDatabaseFuture an abstraction for monitoring and retrieving the results of -// a long-running operation. -type MongoDBResourcesDeleteMongoDBDatabaseFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(MongoDBResourcesClient) (autorest.Response, error) +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *MongoDBResourcesDeleteMongoDBCollectionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for MongoDBResourcesDeleteMongoDBCollectionFuture.Result. +func (future *MongoDBResourcesDeleteMongoDBCollectionFuture) result(client MongoDBResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesDeleteMongoDBCollectionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesDeleteMongoDBCollectionFuture") + return + } + ar.Response = future.Response() + return +} + +// MongoDBResourcesDeleteMongoDBDatabaseFuture an abstraction for monitoring and retrieving the results of +// a long-running operation. +type MongoDBResourcesDeleteMongoDBDatabaseFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(MongoDBResourcesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *MongoDBResourcesDeleteMongoDBDatabaseFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for MongoDBResourcesDeleteMongoDBDatabaseFuture.Result. +func (future *MongoDBResourcesDeleteMongoDBDatabaseFuture) result(client MongoDBResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesDeleteMongoDBDatabaseFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesDeleteMongoDBDatabaseFuture") + return + } + ar.Response = future.Response() + return } // MongoDBResourcesUpdateMongoDBCollectionThroughputFuture an abstraction for monitoring and retrieving the @@ -2678,6 +3360,39 @@ type MongoDBResourcesUpdateMongoDBCollectionThroughputFuture struct { Result func(MongoDBResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *MongoDBResourcesUpdateMongoDBCollectionThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for MongoDBResourcesUpdateMongoDBCollectionThroughputFuture.Result. +func (future *MongoDBResourcesUpdateMongoDBCollectionThroughputFuture) result(client MongoDBResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBCollectionThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesUpdateMongoDBCollectionThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateMongoDBCollectionThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBCollectionThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture struct { @@ -2687,6 +3402,39 @@ type MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture struct { Result func(MongoDBResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture.Result. +func (future *MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture) result(client MongoDBResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateMongoDBDatabaseThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // MongoIndex cosmos DB MongoDB collection index key type MongoIndex struct { // Key - Cosmos DB MongoDB collection index keys @@ -2825,6 +3573,39 @@ type NotebookWorkspacesCreateOrUpdateFuture struct { Result func(NotebookWorkspacesClient) (NotebookWorkspace, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *NotebookWorkspacesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for NotebookWorkspacesCreateOrUpdateFuture.Result. +func (future *NotebookWorkspacesCreateOrUpdateFuture) result(client NotebookWorkspacesClient) (nw NotebookWorkspace, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if nw.Response.Response, err = future.GetResult(sender); err == nil && nw.Response.Response.StatusCode != http.StatusNoContent { + nw, err = client.CreateOrUpdateResponder(nw.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesCreateOrUpdateFuture", "Result", nw.Response.Response, "Failure responding to request") + } + } + return +} + // NotebookWorkspacesDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type NotebookWorkspacesDeleteFuture struct { @@ -2834,6 +3615,33 @@ type NotebookWorkspacesDeleteFuture struct { Result func(NotebookWorkspacesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *NotebookWorkspacesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for NotebookWorkspacesDeleteFuture.Result. +func (future *NotebookWorkspacesDeleteFuture) result(client NotebookWorkspacesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // NotebookWorkspacesRegenerateAuthTokenFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type NotebookWorkspacesRegenerateAuthTokenFuture struct { @@ -2843,6 +3651,33 @@ type NotebookWorkspacesRegenerateAuthTokenFuture struct { Result func(NotebookWorkspacesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *NotebookWorkspacesRegenerateAuthTokenFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for NotebookWorkspacesRegenerateAuthTokenFuture.Result. +func (future *NotebookWorkspacesRegenerateAuthTokenFuture) result(client NotebookWorkspacesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesRegenerateAuthTokenFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesRegenerateAuthTokenFuture") + return + } + ar.Response = future.Response() + return +} + // NotebookWorkspacesStartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type NotebookWorkspacesStartFuture struct { @@ -2852,6 +3687,33 @@ type NotebookWorkspacesStartFuture struct { Result func(NotebookWorkspacesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *NotebookWorkspacesStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for NotebookWorkspacesStartFuture.Result. +func (future *NotebookWorkspacesStartFuture) result(client NotebookWorkspacesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesStartFuture") + return + } + ar.Response = future.Response() + return +} + // Operation REST API operation type Operation struct { // Name - Operation name: {provider}/{resource}/{operation} @@ -3748,6 +4610,39 @@ type SQLResourcesCreateUpdateSQLContainerFuture struct { Result func(SQLResourcesClient) (SQLContainerGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesCreateUpdateSQLContainerFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesCreateUpdateSQLContainerFuture.Result. +func (future *SQLResourcesCreateUpdateSQLContainerFuture) result(client SQLResourcesClient) (scgr SQLContainerGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLContainerFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLContainerFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if scgr.Response.Response, err = future.GetResult(sender); err == nil && scgr.Response.Response.StatusCode != http.StatusNoContent { + scgr, err = client.CreateUpdateSQLContainerResponder(scgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLContainerFuture", "Result", scgr.Response.Response, "Failure responding to request") + } + } + return +} + // SQLResourcesCreateUpdateSQLDatabaseFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SQLResourcesCreateUpdateSQLDatabaseFuture struct { @@ -3757,6 +4652,39 @@ type SQLResourcesCreateUpdateSQLDatabaseFuture struct { Result func(SQLResourcesClient) (SQLDatabaseGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesCreateUpdateSQLDatabaseFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesCreateUpdateSQLDatabaseFuture.Result. +func (future *SQLResourcesCreateUpdateSQLDatabaseFuture) result(client SQLResourcesClient) (sdgr SQLDatabaseGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLDatabaseFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLDatabaseFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sdgr.Response.Response, err = future.GetResult(sender); err == nil && sdgr.Response.Response.StatusCode != http.StatusNoContent { + sdgr, err = client.CreateUpdateSQLDatabaseResponder(sdgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLDatabaseFuture", "Result", sdgr.Response.Response, "Failure responding to request") + } + } + return +} + // SQLResourcesCreateUpdateSQLStoredProcedureFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type SQLResourcesCreateUpdateSQLStoredProcedureFuture struct { @@ -3766,6 +4694,39 @@ type SQLResourcesCreateUpdateSQLStoredProcedureFuture struct { Result func(SQLResourcesClient) (SQLStoredProcedureGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesCreateUpdateSQLStoredProcedureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesCreateUpdateSQLStoredProcedureFuture.Result. +func (future *SQLResourcesCreateUpdateSQLStoredProcedureFuture) result(client SQLResourcesClient) (sspgr SQLStoredProcedureGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLStoredProcedureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLStoredProcedureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sspgr.Response.Response, err = future.GetResult(sender); err == nil && sspgr.Response.Response.StatusCode != http.StatusNoContent { + sspgr, err = client.CreateUpdateSQLStoredProcedureResponder(sspgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLStoredProcedureFuture", "Result", sspgr.Response.Response, "Failure responding to request") + } + } + return +} + // SQLResourcesCreateUpdateSQLTriggerFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SQLResourcesCreateUpdateSQLTriggerFuture struct { @@ -3775,6 +4736,39 @@ type SQLResourcesCreateUpdateSQLTriggerFuture struct { Result func(SQLResourcesClient) (SQLTriggerGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesCreateUpdateSQLTriggerFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesCreateUpdateSQLTriggerFuture.Result. +func (future *SQLResourcesCreateUpdateSQLTriggerFuture) result(client SQLResourcesClient) (stgr SQLTriggerGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLTriggerFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLTriggerFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if stgr.Response.Response, err = future.GetResult(sender); err == nil && stgr.Response.Response.StatusCode != http.StatusNoContent { + stgr, err = client.CreateUpdateSQLTriggerResponder(stgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLTriggerFuture", "Result", stgr.Response.Response, "Failure responding to request") + } + } + return +} + // SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture struct { @@ -3784,6 +4778,39 @@ type SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture struct { Result func(SQLResourcesClient) (SQLUserDefinedFunctionGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture.Result. +func (future *SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture) result(client SQLResourcesClient) (sudfgr SQLUserDefinedFunctionGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sudfgr.Response.Response, err = future.GetResult(sender); err == nil && sudfgr.Response.Response.StatusCode != http.StatusNoContent { + sudfgr, err = client.CreateUpdateSQLUserDefinedFunctionResponder(sudfgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture", "Result", sudfgr.Response.Response, "Failure responding to request") + } + } + return +} + // SQLResourcesDeleteSQLContainerFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SQLResourcesDeleteSQLContainerFuture struct { @@ -3793,6 +4820,33 @@ type SQLResourcesDeleteSQLContainerFuture struct { Result func(SQLResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesDeleteSQLContainerFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesDeleteSQLContainerFuture.Result. +func (future *SQLResourcesDeleteSQLContainerFuture) result(client SQLResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLContainerFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLContainerFuture") + return + } + ar.Response = future.Response() + return +} + // SQLResourcesDeleteSQLDatabaseFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SQLResourcesDeleteSQLDatabaseFuture struct { @@ -3802,6 +4856,33 @@ type SQLResourcesDeleteSQLDatabaseFuture struct { Result func(SQLResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesDeleteSQLDatabaseFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesDeleteSQLDatabaseFuture.Result. +func (future *SQLResourcesDeleteSQLDatabaseFuture) result(client SQLResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLDatabaseFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLDatabaseFuture") + return + } + ar.Response = future.Response() + return +} + // SQLResourcesDeleteSQLStoredProcedureFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SQLResourcesDeleteSQLStoredProcedureFuture struct { @@ -3811,6 +4892,33 @@ type SQLResourcesDeleteSQLStoredProcedureFuture struct { Result func(SQLResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesDeleteSQLStoredProcedureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesDeleteSQLStoredProcedureFuture.Result. +func (future *SQLResourcesDeleteSQLStoredProcedureFuture) result(client SQLResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLStoredProcedureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLStoredProcedureFuture") + return + } + ar.Response = future.Response() + return +} + // SQLResourcesDeleteSQLTriggerFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SQLResourcesDeleteSQLTriggerFuture struct { @@ -3820,6 +4928,33 @@ type SQLResourcesDeleteSQLTriggerFuture struct { Result func(SQLResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesDeleteSQLTriggerFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesDeleteSQLTriggerFuture.Result. +func (future *SQLResourcesDeleteSQLTriggerFuture) result(client SQLResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLTriggerFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLTriggerFuture") + return + } + ar.Response = future.Response() + return +} + // SQLResourcesDeleteSQLUserDefinedFunctionFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type SQLResourcesDeleteSQLUserDefinedFunctionFuture struct { @@ -3829,6 +4964,33 @@ type SQLResourcesDeleteSQLUserDefinedFunctionFuture struct { Result func(SQLResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesDeleteSQLUserDefinedFunctionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesDeleteSQLUserDefinedFunctionFuture.Result. +func (future *SQLResourcesDeleteSQLUserDefinedFunctionFuture) result(client SQLResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLUserDefinedFunctionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLUserDefinedFunctionFuture") + return + } + ar.Response = future.Response() + return +} + // SQLResourcesUpdateSQLContainerThroughputFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type SQLResourcesUpdateSQLContainerThroughputFuture struct { @@ -3838,6 +5000,39 @@ type SQLResourcesUpdateSQLContainerThroughputFuture struct { Result func(SQLResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesUpdateSQLContainerThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesUpdateSQLContainerThroughputFuture.Result. +func (future *SQLResourcesUpdateSQLContainerThroughputFuture) result(client SQLResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLContainerThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesUpdateSQLContainerThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateSQLContainerThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLContainerThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // SQLResourcesUpdateSQLDatabaseThroughputFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type SQLResourcesUpdateSQLDatabaseThroughputFuture struct { @@ -3847,6 +5042,39 @@ type SQLResourcesUpdateSQLDatabaseThroughputFuture struct { Result func(SQLResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SQLResourcesUpdateSQLDatabaseThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SQLResourcesUpdateSQLDatabaseThroughputFuture.Result. +func (future *SQLResourcesUpdateSQLDatabaseThroughputFuture) result(client SQLResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLDatabaseThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesUpdateSQLDatabaseThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateSQLDatabaseThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLDatabaseThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // SQLStoredProcedureCreateUpdateParameters parameters to create and update Cosmos DB storedProcedure. type SQLStoredProcedureCreateUpdateParameters struct { // SQLStoredProcedureCreateUpdateProperties - Properties to create and update Azure Cosmos DB storedProcedure. @@ -4928,6 +6156,39 @@ type TableResourcesCreateUpdateTableFuture struct { Result func(TableResourcesClient) (TableGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TableResourcesCreateUpdateTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TableResourcesCreateUpdateTableFuture.Result. +func (future *TableResourcesCreateUpdateTableFuture) result(client TableResourcesClient) (tgr TableGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.TableResourcesCreateUpdateTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.TableResourcesCreateUpdateTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tgr.Response.Response, err = future.GetResult(sender); err == nil && tgr.Response.Response.StatusCode != http.StatusNoContent { + tgr, err = client.CreateUpdateTableResponder(tgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.TableResourcesCreateUpdateTableFuture", "Result", tgr.Response.Response, "Failure responding to request") + } + } + return +} + // TableResourcesDeleteTableFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type TableResourcesDeleteTableFuture struct { @@ -4937,6 +6198,33 @@ type TableResourcesDeleteTableFuture struct { Result func(TableResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TableResourcesDeleteTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TableResourcesDeleteTableFuture.Result. +func (future *TableResourcesDeleteTableFuture) result(client TableResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.TableResourcesDeleteTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.TableResourcesDeleteTableFuture") + return + } + ar.Response = future.Response() + return +} + // TableResourcesUpdateTableThroughputFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type TableResourcesUpdateTableThroughputFuture struct { @@ -4946,6 +6234,39 @@ type TableResourcesUpdateTableThroughputFuture struct { Result func(TableResourcesClient) (ThroughputSettingsGetResults, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TableResourcesUpdateTableThroughputFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TableResourcesUpdateTableThroughputFuture.Result. +func (future *TableResourcesUpdateTableThroughputFuture) result(client TableResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.TableResourcesUpdateTableThroughputFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("documentdb.TableResourcesUpdateTableThroughputFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tsgr.Response.Response, err = future.GetResult(sender); err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { + tsgr, err = client.UpdateTableThroughputResponder(tsgr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "documentdb.TableResourcesUpdateTableThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") + } + } + return +} + // ThroughputSettingsGetProperties the properties of an Azure Cosmos DB resource throughput type ThroughputSettingsGetProperties struct { Resource *ThroughputSettingsGetPropertiesResource `json:"resource,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/mongodbresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/mongodbresources.go index bb969cd490e..28dc3678344 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/mongodbresources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/mongodbresources.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -132,30 +121,7 @@ func (client MongoDBResourcesClient) CreateUpdateMongoDBCollectionSender(req *ht var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client MongoDBResourcesClient) (mdcgr MongoDBCollectionGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBCollectionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesCreateUpdateMongoDBCollectionFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - mdcgr.Response.Response, err = future.GetResult(sender) - if mdcgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBCollectionFuture", "Result", nil, "received nil response and error") - } - if err == nil && mdcgr.Response.Response.StatusCode != http.StatusNoContent { - mdcgr, err = client.CreateUpdateMongoDBCollectionResponder(mdcgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBCollectionFuture", "Result", mdcgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -258,30 +224,7 @@ func (client MongoDBResourcesClient) CreateUpdateMongoDBDatabaseSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client MongoDBResourcesClient) (mddgr MongoDBDatabaseGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBDatabaseFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesCreateUpdateMongoDBDatabaseFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - mddgr.Response.Response, err = future.GetResult(sender) - if mddgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBDatabaseFuture", "Result", nil, "received nil response and error") - } - if err == nil && mddgr.Response.Response.StatusCode != http.StatusNoContent { - mddgr, err = client.CreateUpdateMongoDBDatabaseResponder(mddgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesCreateUpdateMongoDBDatabaseFuture", "Result", mddgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -377,20 +320,7 @@ func (client MongoDBResourcesClient) DeleteMongoDBCollectionSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client MongoDBResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesDeleteMongoDBCollectionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesDeleteMongoDBCollectionFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -483,20 +413,7 @@ func (client MongoDBResourcesClient) DeleteMongoDBDatabaseSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client MongoDBResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesDeleteMongoDBDatabaseFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesDeleteMongoDBDatabaseFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1157,30 +1074,7 @@ func (client MongoDBResourcesClient) UpdateMongoDBCollectionThroughputSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client MongoDBResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBCollectionThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesUpdateMongoDBCollectionThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBCollectionThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateMongoDBCollectionThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBCollectionThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1283,30 +1177,7 @@ func (client MongoDBResourcesClient) UpdateMongoDBDatabaseThroughputSender(req * var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client MongoDBResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateMongoDBDatabaseThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.MongoDBResourcesUpdateMongoDBDatabaseThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/notebookworkspaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/notebookworkspaces.go index 3d649d8c719..9d33c18dcda 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/notebookworkspaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/notebookworkspaces.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client NotebookWorkspacesClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client NotebookWorkspacesClient) (nw NotebookWorkspace, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - nw.Response.Response, err = future.GetResult(sender) - if nw.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && nw.Response.Response.StatusCode != http.StatusNoContent { - nw, err = client.CreateOrUpdateResponder(nw.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesCreateOrUpdateFuture", "Result", nw.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -239,20 +205,7 @@ func (client NotebookWorkspacesClient) DeleteSender(req *http.Request) (future N var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client NotebookWorkspacesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -616,20 +569,7 @@ func (client NotebookWorkspacesClient) RegenerateAuthTokenSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client NotebookWorkspacesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesRegenerateAuthTokenFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesRegenerateAuthTokenFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -721,20 +661,7 @@ func (client NotebookWorkspacesClient) StartSender(req *http.Request) (future No var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client NotebookWorkspacesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.NotebookWorkspacesStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.NotebookWorkspacesStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/operations.go index 887e0f32d94..cc810d625e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/operations.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeid.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeid.go index 85207bbf6de..792b435ed63 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeid.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeid.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeidregion.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeidregion.go index 7001f91d8b8..7e5c0ca624f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeidregion.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/partitionkeyrangeidregion.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentile.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentile.go index 0e040826265..5005ca8ad03 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentile.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentile.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentilesourcetarget.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentilesourcetarget.go index 279eb033f89..34b9ab24b7f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentilesourcetarget.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentilesourcetarget.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentiletarget.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentiletarget.go index 0853bf8fb67..8b52720e7ae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentiletarget.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/percentiletarget.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/sqlresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/sqlresources.go index d0080c92f46..e2f407858ee 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/sqlresources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/sqlresources.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -138,30 +127,7 @@ func (client SQLResourcesClient) CreateUpdateSQLContainerSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (scgr SQLContainerGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLContainerFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLContainerFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - scgr.Response.Response, err = future.GetResult(sender) - if scgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLContainerFuture", "Result", nil, "received nil response and error") - } - if err == nil && scgr.Response.Response.StatusCode != http.StatusNoContent { - scgr, err = client.CreateUpdateSQLContainerResponder(scgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLContainerFuture", "Result", scgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -264,30 +230,7 @@ func (client SQLResourcesClient) CreateUpdateSQLDatabaseSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (sdgr SQLDatabaseGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLDatabaseFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLDatabaseFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sdgr.Response.Response, err = future.GetResult(sender) - if sdgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLDatabaseFuture", "Result", nil, "received nil response and error") - } - if err == nil && sdgr.Response.Response.StatusCode != http.StatusNoContent { - sdgr, err = client.CreateUpdateSQLDatabaseResponder(sdgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLDatabaseFuture", "Result", sdgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -394,30 +337,7 @@ func (client SQLResourcesClient) CreateUpdateSQLStoredProcedureSender(req *http. var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (sspgr SQLStoredProcedureGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLStoredProcedureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLStoredProcedureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sspgr.Response.Response, err = future.GetResult(sender) - if sspgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLStoredProcedureFuture", "Result", nil, "received nil response and error") - } - if err == nil && sspgr.Response.Response.StatusCode != http.StatusNoContent { - sspgr, err = client.CreateUpdateSQLStoredProcedureResponder(sspgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLStoredProcedureFuture", "Result", sspgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -524,30 +444,7 @@ func (client SQLResourcesClient) CreateUpdateSQLTriggerSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (stgr SQLTriggerGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLTriggerFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLTriggerFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - stgr.Response.Response, err = future.GetResult(sender) - if stgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLTriggerFuture", "Result", nil, "received nil response and error") - } - if err == nil && stgr.Response.Response.StatusCode != http.StatusNoContent { - stgr, err = client.CreateUpdateSQLTriggerResponder(stgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLTriggerFuture", "Result", stgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -655,30 +552,7 @@ func (client SQLResourcesClient) CreateUpdateSQLUserDefinedFunctionSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (sudfgr SQLUserDefinedFunctionGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sudfgr.Response.Response, err = future.GetResult(sender) - if sudfgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture", "Result", nil, "received nil response and error") - } - if err == nil && sudfgr.Response.Response.StatusCode != http.StatusNoContent { - sudfgr, err = client.CreateUpdateSQLUserDefinedFunctionResponder(sudfgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesCreateUpdateSQLUserDefinedFunctionFuture", "Result", sudfgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -774,20 +648,7 @@ func (client SQLResourcesClient) DeleteSQLContainerSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLContainerFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLContainerFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -880,20 +741,7 @@ func (client SQLResourcesClient) DeleteSQLDatabaseSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLDatabaseFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLDatabaseFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -990,20 +838,7 @@ func (client SQLResourcesClient) DeleteSQLStoredProcedureSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLStoredProcedureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLStoredProcedureFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1100,20 +935,7 @@ func (client SQLResourcesClient) DeleteSQLTriggerSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLTriggerFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLTriggerFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1210,20 +1032,7 @@ func (client SQLResourcesClient) DeleteSQLUserDefinedFunctionSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesDeleteSQLUserDefinedFunctionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesDeleteSQLUserDefinedFunctionFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -2452,30 +2261,7 @@ func (client SQLResourcesClient) UpdateSQLContainerThroughputSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLContainerThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesUpdateSQLContainerThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLContainerThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateSQLContainerThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLContainerThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -2577,30 +2363,7 @@ func (client SQLResourcesClient) UpdateSQLDatabaseThroughputSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SQLResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLDatabaseThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.SQLResourcesUpdateSQLDatabaseThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLDatabaseThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateSQLDatabaseThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.SQLResourcesUpdateSQLDatabaseThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/tableresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/tableresources.go index 0f9582dfe05..62068fd0473 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/tableresources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/tableresources.go @@ -1,18 +1,7 @@ package documentdb -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -129,30 +118,7 @@ func (client TableResourcesClient) CreateUpdateTableSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TableResourcesClient) (tgr TableGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.TableResourcesCreateUpdateTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.TableResourcesCreateUpdateTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tgr.Response.Response, err = future.GetResult(sender) - if tgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.TableResourcesCreateUpdateTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && tgr.Response.Response.StatusCode != http.StatusNoContent { - tgr, err = client.CreateUpdateTableResponder(tgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.TableResourcesCreateUpdateTableFuture", "Result", tgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -246,20 +212,7 @@ func (client TableResourcesClient) DeleteTableSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TableResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.TableResourcesDeleteTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.TableResourcesDeleteTableFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -635,30 +588,7 @@ func (client TableResourcesClient) UpdateTableThroughputSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TableResourcesClient) (tsgr ThroughputSettingsGetResults, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.TableResourcesUpdateTableThroughputFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("documentdb.TableResourcesUpdateTableThroughputFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tsgr.Response.Response, err = future.GetResult(sender) - if tsgr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "documentdb.TableResourcesUpdateTableThroughputFuture", "Result", nil, "received nil response and error") - } - if err == nil && tsgr.Response.Response.StatusCode != http.StatusNoContent { - tsgr, err = client.UpdateTableThroughputResponder(tsgr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "documentdb.TableResourcesUpdateTableThroughputFuture", "Result", tsgr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/version.go index 78d8309936e..e0c7ca228ab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2019-08-01/documentdb/version.go @@ -2,19 +2,8 @@ package documentdb import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/CHANGELOG.md index ca7f7ec433b..d5139164274 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/CHANGELOG.md @@ -1,5 +1,8 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/dns/resource-manager/readme.md tag: `package-2018-05` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *ZonesDeleteFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/_meta.json new file mode 100644 index 00000000000..1e83ad5caa1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/dns/resource-manager/readme.md", + "tag": "package-2018-05", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2018-05 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/dns/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go index fb721c511bd..2acecfc15c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go @@ -3,19 +3,8 @@ // The DNS Management Client. package dns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/enums.go index b5ab94cc3e6..90319affc9b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/enums.go @@ -1,18 +1,7 @@ package dns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go index 8f1f0dfb8b0..3a466f909f2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go @@ -1,18 +1,7 @@ package dns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -928,6 +917,33 @@ type ZonesDeleteFuture struct { Result func(ZonesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ZonesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ZonesDeleteFuture.Result. +func (future *ZonesDeleteFuture) result(client ZonesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("dns.ZonesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ZoneUpdate describes a request to update a DNS zone. type ZoneUpdate struct { // Tags - Resource tags. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go index 798792aa325..d3aa10e26d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go @@ -1,18 +1,7 @@ package dns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go index 2523d4376dd..875735853f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go @@ -1,18 +1,7 @@ package dns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go index a5924f426e6..940b45d22f9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go @@ -2,19 +2,8 @@ package dns import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go index 68060419173..15e42c17b48 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go @@ -1,18 +1,7 @@ package dns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -201,20 +190,7 @@ func (client ZonesClient) DeleteSender(req *http.Request) (future ZonesDeleteFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ZonesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("dns.ZonesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/CHANGELOG.md index 6ce074d0931..465731bfdfc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/CHANGELOG.md @@ -1,5 +1,5 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/graphrbac/data-plane/readme.md tag: `1.6` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/_meta.json new file mode 100644 index 00000000000..b4ed087169e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/graphrbac/data-plane/readme.md", + "tag": "1.6", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=1.6 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/graphrbac/data-plane/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go index d32f8e0e4e2..1409be13a82 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go index e22971a0fc5..8cecf6457ab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go @@ -3,19 +3,8 @@ // The Graph RBAC Management Client package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go index 5aa143f43bc..dfb5eda6de1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go index 55e97b20ee3..4c1291988b3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/enums.go index 2e9cbd70f6a..bc867cafb98 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/enums.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go index d03739fb3af..7d8e52982f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go index 6ff58bf7bac..2bbac36f178 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go index 74618ef492b..30b7d9f640b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go index b4b18246108..c4b78e94b34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go index 4e9d43405aa..76920172d85 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go index 0f805b5165f..312d554de74 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go index b9a7a56c3e8..42afe2645b7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go @@ -1,18 +1,7 @@ package graphrbac -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go index b0d5c79fd7c..b58234fc4e1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go @@ -2,19 +2,8 @@ package graphrbac import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/CHANGELOG.md index 924e857cdb6..18af4d70e72 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/CHANGELOG.md @@ -1,5 +1,8 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/keyvault/resource-manager/readme.md tag: `package-2016-10` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *VaultsPurgeDeletedFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/_meta.json new file mode 100644 index 00000000000..7e1bce77fd6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/keyvault/resource-manager/readme.md", + "tag": "package-2016-10", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2016-10 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/client.go index 16dd84463b1..525e9d9e54c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/client.go @@ -3,19 +3,8 @@ // The Azure management API provides a RESTful set of web services that interact with Azure Key Vault. package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/enums.go index f07de23e6be..2727a898680 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/enums.go @@ -1,18 +1,7 @@ package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/models.go index c0a89a986a4..bc31cad0ce6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/models.go @@ -1,18 +1,7 @@ package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -1065,3 +1054,30 @@ type VaultsPurgeDeletedFuture struct { // If the operation has not completed it will return an error. Result func(VaultsClient) (autorest.Response, error) } + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VaultsPurgeDeletedFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VaultsPurgeDeletedFuture.Result. +func (future *VaultsPurgeDeletedFuture) result(client VaultsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.VaultsPurgeDeletedFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("keyvault.VaultsPurgeDeletedFuture") + return + } + ar.Response = future.Response() + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/operations.go index 3b515c40ea2..67bb31cc86e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/operations.go @@ -1,18 +1,7 @@ package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/vaults.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/vaults.go index 906e70fc9f2..3158589c596 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/vaults.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/vaults.go @@ -1,18 +1,7 @@ package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -977,20 +966,7 @@ func (client VaultsClient) PurgeDeletedSender(req *http.Request) (future VaultsP var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VaultsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.VaultsPurgeDeletedFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("keyvault.VaultsPurgeDeletedFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/version.go index b63082f340a..f36b49b93d8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2016-10-01/keyvault/version.go @@ -2,19 +2,8 @@ package keyvault import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md index 78b3f993b2e..fe9528caf7c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md @@ -1,5 +1,5 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/keyvault/data-plane/readme.md tag: `package-7.0` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/_meta.json new file mode 100644 index 00000000000..06d51a27ece --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", + "tag": "package-7.0", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-7.0 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go index 819b1046dd0..c5589a020b4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go @@ -3,19 +3,8 @@ // The key vault client performs cryptographic key operations and vault operations against the Key Vault service. package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go index fbc94bc147b..7733a009aff 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go @@ -1,18 +1,7 @@ package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go index cb4f46bb428..8016d7f04d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go @@ -1,18 +1,7 @@ package keyvault -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go index 1f7f80692b0..c459083898b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go @@ -2,19 +2,8 @@ package keyvault import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/CHANGELOG.md index e17ac1de138..3f351b0c13f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/CHANGELOG.md @@ -1,5 +1,5 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/msi/resource-manager/readme.md tag: `package-2018-11-30` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/_meta.json new file mode 100644 index 00000000000..b28323c8a59 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/msi/resource-manager/readme.md", + "tag": "package-2018-11-30", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2018-11-30 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/msi/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/client.go index 172b8fa3eb8..0275b1df110 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/client.go @@ -3,19 +3,8 @@ // The Managed Service Identity Client. package msi -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/models.go index b1486f85bc5..b8e04142293 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/models.go @@ -1,18 +1,7 @@ package msi -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/operations.go index afda77cbb6b..2c053ff1ed7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/operations.go @@ -1,18 +1,7 @@ package msi -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/systemassignedidentities.go b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/systemassignedidentities.go index 10515ecd593..7be8bb0fd96 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/systemassignedidentities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/systemassignedidentities.go @@ -1,18 +1,7 @@ package msi -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/userassignedidentities.go b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/userassignedidentities.go index ec8a15fc951..3fe1b31d630 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/userassignedidentities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/userassignedidentities.go @@ -1,18 +1,7 @@ package msi -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/version.go index c324c23e68f..e7655eccb2c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi/version.go @@ -2,19 +2,8 @@ package msi import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/CHANGELOG.md index 3e61c07c02b..1f6b10ec470 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/CHANGELOG.md @@ -1,5 +1,167 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/network/resource-manager/readme.md tag: `package-2018-12` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *ApplicationGatewaysBackendHealthFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysStartFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysStopFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ApplicationSecurityGroupsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ApplicationSecurityGroupsDeleteFuture.UnmarshalJSON([]byte) error +1. *ApplicationSecurityGroupsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *AzureFirewallsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *AzureFirewallsDeleteFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsDeleteFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsQueryFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsStartFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsStopFuture.UnmarshalJSON([]byte) error +1. *DdosCustomPoliciesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DdosCustomPoliciesDeleteFuture.UnmarshalJSON([]byte) error +1. *DdosCustomPoliciesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *DdosProtectionPlansCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DdosProtectionPlansDeleteFuture.UnmarshalJSON([]byte) error +1. *DdosProtectionPlansUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitAuthorizationsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitPeeringsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitPeeringsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsListArpTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsListRoutesTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsListRoutesTableSummaryFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionPeeringsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsListArpTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsListRoutesTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRoutePortsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRoutePortsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRoutePortsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *InboundNatRulesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *InboundNatRulesDeleteFuture.UnmarshalJSON([]byte) error +1. *InterfaceEndpointsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *InterfaceEndpointsDeleteFuture.UnmarshalJSON([]byte) error +1. *InterfaceTapConfigurationsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *InterfaceTapConfigurationsDeleteFuture.UnmarshalJSON([]byte) error +1. *InterfacesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *InterfacesDeleteFuture.UnmarshalJSON([]byte) error +1. *InterfacesGetEffectiveRouteTableFuture.UnmarshalJSON([]byte) error +1. *InterfacesListEffectiveNetworkSecurityGroupsFuture.UnmarshalJSON([]byte) error +1. *InterfacesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *LoadBalancersCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *LoadBalancersDeleteFuture.UnmarshalJSON([]byte) error +1. *LoadBalancersUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *LocalNetworkGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *LocalNetworkGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *LocalNetworkGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysGenerateVpnProfileFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *P2sVpnServerConfigurationsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *P2sVpnServerConfigurationsDeleteFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesCreateFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesDeleteFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesGetStatusFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesStopFuture.UnmarshalJSON([]byte) error +1. *ProfilesDeleteFuture.UnmarshalJSON([]byte) error +1. *PublicIPAddressesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PublicIPAddressesDeleteFuture.UnmarshalJSON([]byte) error +1. *PublicIPAddressesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *PublicIPPrefixesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PublicIPPrefixesDeleteFuture.UnmarshalJSON([]byte) error +1. *PublicIPPrefixesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *RouteFilterRulesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteFilterRulesDeleteFuture.UnmarshalJSON([]byte) error +1. *RouteFilterRulesUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteFiltersCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteFiltersDeleteFuture.UnmarshalJSON([]byte) error +1. *RouteFiltersUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteTablesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteTablesDeleteFuture.UnmarshalJSON([]byte) error +1. *RouteTablesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *RoutesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RoutesDeleteFuture.UnmarshalJSON([]byte) error +1. *SecurityGroupsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SecurityGroupsDeleteFuture.UnmarshalJSON([]byte) error +1. *SecurityGroupsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *SecurityRulesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SecurityRulesDeleteFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPoliciesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPoliciesDeleteFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPoliciesUpdateFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPolicyDefinitionsDeleteFuture.UnmarshalJSON([]byte) error +1. *SubnetsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SubnetsDeleteFuture.UnmarshalJSON([]byte) error +1. *SubnetsPrepareNetworkPoliciesFuture.UnmarshalJSON([]byte) error +1. *VirtualHubsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualHubsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualHubsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsResetSharedKeyFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsSetSharedKeyFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGenerateVpnProfileFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGeneratevpnclientpackageFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetAdvertisedRoutesFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetBgpPeerStatusFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetLearnedRoutesFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysResetFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkPeeringsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkPeeringsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkTapsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkTapsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkTapsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworksCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworksDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworksUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualWansCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualWansDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualWansUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VpnConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VpnConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VpnGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VpnGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *VpnGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VpnSitesConfigurationDownloadFuture.UnmarshalJSON([]byte) error +1. *VpnSitesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VpnSitesDeleteFuture.UnmarshalJSON([]byte) error +1. *VpnSitesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *WatchersCheckConnectivityFuture.UnmarshalJSON([]byte) error +1. *WatchersDeleteFuture.UnmarshalJSON([]byte) error +1. *WatchersGetAzureReachabilityReportFuture.UnmarshalJSON([]byte) error +1. *WatchersGetFlowLogStatusFuture.UnmarshalJSON([]byte) error +1. *WatchersGetNetworkConfigurationDiagnosticFuture.UnmarshalJSON([]byte) error +1. *WatchersGetNextHopFuture.UnmarshalJSON([]byte) error +1. *WatchersGetTroubleshootingFuture.UnmarshalJSON([]byte) error +1. *WatchersGetTroubleshootingResultFuture.UnmarshalJSON([]byte) error +1. *WatchersGetVMSecurityRulesFuture.UnmarshalJSON([]byte) error +1. *WatchersListAvailableProvidersFuture.UnmarshalJSON([]byte) error +1. *WatchersSetFlowLogConfigurationFuture.UnmarshalJSON([]byte) error +1. *WatchersVerifyIPFlowFuture.UnmarshalJSON([]byte) error +1. *WebApplicationFirewallPoliciesDeleteFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/_meta.json new file mode 100644 index 00000000000..71f265e54ca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/network/resource-manager/readme.md", + "tag": "package-2018-12", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2018-12 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/network/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationgateways.go index e2e9e26b264..d1822cff5c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationgateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -109,30 +98,7 @@ func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (agbh ApplicationGatewayBackendHealth, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysBackendHealthFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - agbh.Response.Response, err = future.GetResult(sender) - if agbh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", nil, "received nil response and error") - } - if err == nil && agbh.Response.Response.StatusCode != http.StatusNoContent { - agbh, err = client.BackendHealthResponder(agbh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", agbh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -243,30 +209,7 @@ func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ag.Response.Response, err = future.GetResult(sender) - if ag.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { - ag, err = client.CreateOrUpdateResponder(ag.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", ag.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -344,20 +287,7 @@ func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1281,20 +1211,7 @@ func (client ApplicationGatewaysClient) StartSender(req *http.Request) (future A var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1371,20 +1288,7 @@ func (client ApplicationGatewaysClient) StopSender(req *http.Request) (future Ap var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStopFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1464,30 +1368,7 @@ func (client ApplicationGatewaysClient) UpdateTagsSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ag.Response.Response, err = future.GetResult(sender) - if ag.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { - ag, err = client.UpdateTagsResponder(ag.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", ag.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationsecuritygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationsecuritygroups.go index de124eea770..d04d90ae890 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationsecuritygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/applicationsecuritygroups.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdateSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - asg.Response.Response, err = future.GetResult(sender) - if asg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { - asg, err = client.CreateOrUpdateResponder(asg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", asg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client ApplicationSecurityGroupsClient) DeleteSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationSecurityGroupsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -607,30 +560,7 @@ func (client ApplicationSecurityGroupsClient) UpdateTagsSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - asg.Response.Response, err = future.GetResult(sender) - if asg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { - asg, err = client.UpdateTagsResponder(asg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", asg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availabledelegations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availabledelegations.go index 9a63bd7a700..5dcb13f80da 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availabledelegations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availabledelegations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableendpointservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableendpointservices.go index 1d35021c1e1..be677cbcaa8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableendpointservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableendpointservices.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableresourcegroupdelegations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableresourcegroupdelegations.go index a29fb399b5b..40c52ce6776 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableresourcegroupdelegations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/availableresourcegroupdelegations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewallfqdntags.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewallfqdntags.go index b9ef24ab21a..c6a69d86dcf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewallfqdntags.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewallfqdntags.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewalls.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewalls.go index 01b0547077c..998fb02a13e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewalls.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/azurefirewalls.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client AzureFirewallsClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AzureFirewallsClient) (af AzureFirewall, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - af.Response.Response, err = future.GetResult(sender) - if af.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && af.Response.Response.StatusCode != http.StatusNoContent { - af, err = client.CreateOrUpdateResponder(af.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", af.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client AzureFirewallsClient) DeleteSender(req *http.Request) (future Azure var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AzureFirewallsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/bgpservicecommunities.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/bgpservicecommunities.go index 16d45a5d974..b0508aa02a7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/bgpservicecommunities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/bgpservicecommunities.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/client.go index d163a1142cb..d723392fc57 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/client.go @@ -3,19 +3,8 @@ // Network Client package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/connectionmonitors.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/connectionmonitors.go index 462e5217ba2..5dce750e13e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/connectionmonitors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/connectionmonitors.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -120,30 +109,7 @@ func (client ConnectionMonitorsClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (cmr ConnectionMonitorResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cmr.Response.Response, err = future.GetResult(sender) - if cmr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && cmr.Response.Response.StatusCode != http.StatusNoContent { - cmr, err = client.CreateOrUpdateResponder(cmr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", cmr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -223,20 +189,7 @@ func (client ConnectionMonitorsClient) DeleteSender(req *http.Request) (future C var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -469,30 +422,7 @@ func (client ConnectionMonitorsClient) QuerySender(req *http.Request) (future Co var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (cmqr ConnectionMonitorQueryResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsQueryFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cmqr.Response.Response, err = future.GetResult(sender) - if cmqr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", nil, "received nil response and error") - } - if err == nil && cmqr.Response.Response.StatusCode != http.StatusNoContent { - cmqr, err = client.QueryResponder(cmqr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", cmqr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -572,20 +502,7 @@ func (client ConnectionMonitorsClient) StartSender(req *http.Request) (future Co var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -664,20 +581,7 @@ func (client ConnectionMonitorsClient) StopSender(req *http.Request) (future Con var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStopFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddoscustompolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddoscustompolicies.go index d1eafea64a5..161310efafa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddoscustompolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddoscustompolicies.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client DdosCustomPoliciesClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dcp.Response.Response, err = future.GetResult(sender) - if dcp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { - dcp, err = client.CreateOrUpdateResponder(dcp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", dcp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client DdosCustomPoliciesClient) DeleteSender(req *http.Request) (future D var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosCustomPoliciesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -378,30 +331,7 @@ func (client DdosCustomPoliciesClient) UpdateTagsSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dcp.Response.Response, err = future.GetResult(sender) - if dcp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { - dcp, err = client.UpdateTagsResponder(dcp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", dcp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddosprotectionplans.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddosprotectionplans.go index 658f90eedb3..943e6aeec57 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddosprotectionplans.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/ddosprotectionplans.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -111,30 +100,7 @@ func (client DdosProtectionPlansClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dpp.Response.Response, err = future.GetResult(sender) - if dpp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { - dpp, err = client.CreateOrUpdateResponder(dpp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", dpp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -212,20 +178,7 @@ func (client DdosProtectionPlansClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosProtectionPlansClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client DdosProtectionPlansClient) UpdateTagsSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dpp.Response.Response, err = future.GetResult(sender) - if dpp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { - dpp, err = client.UpdateTagsResponder(dpp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", dpp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/defaultsecurityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/defaultsecurityrules.go index ff2b913e99b..6f9bd5bf216 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/defaultsecurityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/defaultsecurityrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/enums.go index 879adc554fb..9a6ff365176 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/enums.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitauthorizations.go index 56870bc676c..78465952f17 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitauthorizations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitauthorizations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -112,30 +101,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req * var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitAuthorizationsClient) (erca ExpressRouteCircuitAuthorization, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erca.Response.Response, err = future.GetResult(sender) - if erca.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erca.Response.Response.StatusCode != http.StatusNoContent { - erca, err = client.CreateOrUpdateResponder(erca.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", erca.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -215,20 +181,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitAuthorizationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitconnections.go index b3c98588766..25e6aa305e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -113,30 +102,7 @@ func (client ExpressRouteCircuitConnectionsClient) CreateOrUpdateSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitConnectionsClient) (ercc ExpressRouteCircuitConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercc.Response.Response, err = future.GetResult(sender) - if ercc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { - ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -218,20 +184,7 @@ func (client ExpressRouteCircuitConnectionsClient) DeleteSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitpeerings.go index a3cd1c7efd1..b556736df17 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuitpeerings.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -122,30 +111,7 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitPeeringsClient) (ercp ExpressRouteCircuitPeering, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercp.Response.Response, err = future.GetResult(sender) - if ercp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercp.Response.Response.StatusCode != http.StatusNoContent { - ercp, err = client.CreateOrUpdateResponder(ercp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", ercp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -225,20 +191,7 @@ func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitPeeringsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuits.go index c51f8bff19d..d4e580a5e09 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuits.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecircuits.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erc.Response.Response, err = future.GetResult(sender) - if erc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { - erc, err = client.CreateOrUpdateResponder(erc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -762,30 +715,7 @@ func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListArpTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercatlr.Response.Response, err = future.GetResult(sender) - if ercatlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { - ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -868,30 +798,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercrtlr.Response.Response, err = future.GetResult(sender) - if ercrtlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { - ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -974,30 +881,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http. var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ercrtslr ExpressRouteCircuitsRoutesTableSummaryListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableSummaryFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercrtslr.Response.Response, err = future.GetResult(sender) - if ercrtslr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercrtslr.Response.Response.StatusCode != http.StatusNoContent { - ercrtslr, err = client.ListRoutesTableSummaryResponder(ercrtslr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", ercrtslr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1078,30 +962,7 @@ func (client ExpressRouteCircuitsClient) UpdateTagsSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erc.Response.Response, err = future.GetResult(sender) - if erc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { - erc, err = client.UpdateTagsResponder(erc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", erc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteconnections.go index 21eaa7a0f62..9c259f09137 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -118,30 +107,7 @@ func (client ExpressRouteConnectionsClient) CreateOrUpdateSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteConnectionsClient) (erc ExpressRouteConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erc.Response.Response, err = future.GetResult(sender) - if erc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { - erc, err = client.CreateOrUpdateResponder(erc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -221,20 +187,7 @@ func (client ExpressRouteConnectionsClient) DeleteSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnectionpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnectionpeerings.go index 8fd44515863..4a7c4267359 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnectionpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnectionpeerings.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -124,30 +113,7 @@ func (client ExpressRouteCrossConnectionPeeringsClient) CreateOrUpdateSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionPeeringsClient) (erccp ExpressRouteCrossConnectionPeering, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erccp.Response.Response, err = future.GetResult(sender) - if erccp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erccp.Response.Response.StatusCode != http.StatusNoContent { - erccp, err = client.CreateOrUpdateResponder(erccp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", erccp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -227,20 +193,7 @@ func (client ExpressRouteCrossConnectionPeeringsClient) DeleteSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionPeeringsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnections.go index e3d1c51ff5f..3a06fcec09e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutecrossconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ExpressRouteCrossConnectionsClient) CreateOrUpdateSender(req *http. var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercc.Response.Response, err = future.GetResult(sender) - if ercc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { - ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -403,30 +369,7 @@ func (client ExpressRouteCrossConnectionsClient) ListArpTableSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListArpTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercatlr.Response.Response, err = future.GetResult(sender) - if ercatlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { - ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -625,30 +568,7 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercrtlr.Response.Response, err = future.GetResult(sender) - if ercrtlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { - ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -731,30 +651,7 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummarySender(re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erccrtslr.Response.Response, err = future.GetResult(sender) - if erccrtslr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", nil, "received nil response and error") - } - if err == nil && erccrtslr.Response.Response.StatusCode != http.StatusNoContent { - erccrtslr, err = client.ListRoutesTableSummaryResponder(erccrtslr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", erccrtslr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -835,30 +732,7 @@ func (client ExpressRouteCrossConnectionsClient) UpdateTagsSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercc.Response.Response, err = future.GetResult(sender) - if ercc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { - ercc, err = client.UpdateTagsResponder(ercc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", ercc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutegateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutegateways.go index b9ee353ead5..d6fff636ffe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutegateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutegateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,30 +105,7 @@ func (client ExpressRouteGatewaysClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteGatewaysClient) (erg ExpressRouteGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erg.Response.Response, err = future.GetResult(sender) - if erg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erg.Response.Response.StatusCode != http.StatusNoContent { - erg, err = client.CreateOrUpdateResponder(erg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", erg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -218,20 +184,7 @@ func (client ExpressRouteGatewaysClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutelinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutelinks.go index 44c9d266dbc..a684b5e2208 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutelinks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressroutelinks.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteports.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteports.go index 01bb6fea09c..b4dd5402247 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteports.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteports.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ExpressRoutePortsClient) CreateOrUpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erp.Response.Response, err = future.GetResult(sender) - if erp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { - erp, err = client.CreateOrUpdateResponder(erp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", erp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client ExpressRoutePortsClient) DeleteSender(req *http.Request) (future Ex var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRoutePortsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -607,30 +560,7 @@ func (client ExpressRoutePortsClient) UpdateTagsSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erp.Response.Response, err = future.GetResult(sender) - if erp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { - erp, err = client.UpdateTagsResponder(erp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", erp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteportslocations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteportslocations.go index e9dfe57d73c..1745c303140 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteportslocations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteportslocations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteserviceproviders.go index 155dd92a3b2..ae6864f8e30 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteserviceproviders.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/expressrouteserviceproviders.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/hubvirtualnetworkconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/hubvirtualnetworkconnections.go index 75e28d3b019..0370768a9db 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/hubvirtualnetworkconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/hubvirtualnetworkconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/inboundnatrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/inboundnatrules.go index 40996ad5152..9e9c15496a9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/inboundnatrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/inboundnatrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -128,30 +117,7 @@ func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InboundNatRulesClient) (inr InboundNatRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - inr.Response.Response, err = future.GetResult(sender) - if inr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && inr.Response.Response.StatusCode != http.StatusNoContent { - inr, err = client.CreateOrUpdateResponder(inr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", inr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -231,20 +197,7 @@ func (client InboundNatRulesClient) DeleteSender(req *http.Request) (future Inbo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InboundNatRulesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceendpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceendpoints.go index 2901ee500b4..397a3d948cb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceendpoints.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceendpoints.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client InterfaceEndpointsClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfaceEndpointsClient) (ie InterfaceEndpoint, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfaceEndpointsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ie.Response.Response, err = future.GetResult(sender) - if ie.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ie.Response.Response.StatusCode != http.StatusNoContent { - ie, err = client.CreateOrUpdateResponder(ie.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsCreateOrUpdateFuture", "Result", ie.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client InterfaceEndpointsClient) DeleteSender(req *http.Request) (future I var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfaceEndpointsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfaceEndpointsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceipconfigurations.go index 735f430ba76..f9688b16583 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceipconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceloadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceloadbalancers.go index f6c3fe5688e..fe16208a8d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceloadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfaceloadbalancers.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacesgroup.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacesgroup.go index 2a496bfc382..e8fc292d4bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacesgroup.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacesgroup.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (future I var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (i Interface, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.CreateOrUpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client InterfacesClient) DeleteSender(req *http.Request) (future Interface var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -377,30 +330,7 @@ func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (erlr EffectiveRouteListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesGetEffectiveRouteTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erlr.Response.Response, err = future.GetResult(sender) - if erlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && erlr.Response.Response.StatusCode != http.StatusNoContent { - erlr, err = client.GetEffectiveRouteTableResponder(erlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", erlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -878,30 +808,7 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (ensglr EffectiveNetworkSecurityGroupListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesListEffectiveNetworkSecurityGroupsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ensglr.Response.Response, err = future.GetResult(sender) - if ensglr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", nil, "received nil response and error") - } - if err == nil && ensglr.Response.Response.StatusCode != http.StatusNoContent { - ensglr, err = client.ListEffectiveNetworkSecurityGroupsResponder(ensglr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", ensglr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1348,30 +1255,7 @@ func (client InterfacesClient) UpdateTagsSender(req *http.Request) (future Inter var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (i Interface, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.UpdateTagsResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacetapconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacetapconfigurations.go index 6967c32ff6f..2ea9c5d510b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacetapconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/interfacetapconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -146,30 +135,7 @@ func (client InterfaceTapConfigurationsClient) CreateOrUpdateSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfaceTapConfigurationsClient) (itc InterfaceTapConfiguration, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - itc.Response.Response, err = future.GetResult(sender) - if itc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && itc.Response.Response.StatusCode != http.StatusNoContent { - itc, err = client.CreateOrUpdateResponder(itc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", itc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -249,20 +215,7 @@ func (client InterfaceTapConfigurationsClient) DeleteSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfaceTapConfigurationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerbackendaddresspools.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerbackendaddresspools.go index 80501532e3c..dc77cbba818 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerbackendaddresspools.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerbackendaddresspools.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerfrontendipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerfrontendipconfigurations.go index 3ab314c8c6f..747fe0bb2df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerfrontendipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerfrontendipconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerloadbalancingrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerloadbalancingrules.go index e44348178a9..2f61bdcd603 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerloadbalancingrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerloadbalancingrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancernetworkinterfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancernetworkinterfaces.go index 5d7d51d6ac3..41a87417e6a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancernetworkinterfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancernetworkinterfaces.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalanceroutboundrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalanceroutboundrules.go index 2b976cfcebd..fea6b6a9a20 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalanceroutboundrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalanceroutboundrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerprobes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerprobes.go index 78b7ae0ce8e..c0115bdfa2c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerprobes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancerprobes.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go index 3d3d506e5bc..ed33b476ab9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LoadBalancersClient) (lb LoadBalancer, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LoadBalancersCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lb.Response.Response, err = future.GetResult(sender) - if lb.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { - lb, err = client.CreateOrUpdateResponder(lb.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", lb.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client LoadBalancersClient) DeleteSender(req *http.Request) (future LoadBa var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LoadBalancersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LoadBalancersDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -609,30 +562,7 @@ func (client LoadBalancersClient) UpdateTagsSender(req *http.Request) (future Lo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LoadBalancersClient) (lb LoadBalancer, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LoadBalancersUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lb.Response.Response, err = future.GetResult(sender) - if lb.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { - lb, err = client.UpdateTagsResponder(lb.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", lb.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/localnetworkgateways.go index 5c1aa025049..f58a208ca3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/localnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/localnetworkgateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,30 +105,7 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lng.Response.Response, err = future.GetResult(sender) - if lng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { - lng, err = client.CreateOrUpdateResponder(lng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", lng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -223,20 +189,7 @@ func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LocalNetworkGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -520,30 +473,7 @@ func (client LocalNetworkGatewaysClient) UpdateTagsSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lng.Response.Response, err = future.GetResult(sender) - if lng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { - lng, err = client.UpdateTagsResponder(lng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", lng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go index fc2b4239461..7bb883c4fd4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -2354,6 +2343,39 @@ type ApplicationGatewaysBackendHealthFuture struct { Result func(ApplicationGatewaysClient) (ApplicationGatewayBackendHealth, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysBackendHealthFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysBackendHealthFuture.Result. +func (future *ApplicationGatewaysBackendHealthFuture) result(client ApplicationGatewaysClient) (agbh ApplicationGatewayBackendHealth, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysBackendHealthFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if agbh.Response.Response, err = future.GetResult(sender); err == nil && agbh.Response.Response.StatusCode != http.StatusNoContent { + agbh, err = client.BackendHealthResponder(agbh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", agbh.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationGatewaysCreateOrUpdateFuture struct { @@ -2363,6 +2385,39 @@ type ApplicationGatewaysCreateOrUpdateFuture struct { Result func(ApplicationGatewaysClient) (ApplicationGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysCreateOrUpdateFuture.Result. +func (future *ApplicationGatewaysCreateOrUpdateFuture) result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ag.Response.Response, err = future.GetResult(sender); err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { + ag, err = client.CreateOrUpdateResponder(ag.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", ag.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationGatewaysDeleteFuture struct { @@ -2372,6 +2427,33 @@ type ApplicationGatewaysDeleteFuture struct { Result func(ApplicationGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysDeleteFuture.Result. +func (future *ApplicationGatewaysDeleteFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationGatewaySku SKU of an application gateway type ApplicationGatewaySku struct { // Name - Name of an application gateway SKU. Possible values include: 'StandardSmall', 'StandardMedium', 'StandardLarge', 'WAFMedium', 'WAFLarge', 'StandardV2', 'WAFV2' @@ -2590,6 +2672,33 @@ type ApplicationGatewaysStartFuture struct { Result func(ApplicationGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysStartFuture.Result. +func (future *ApplicationGatewaysStartFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStartFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationGatewaysStopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ApplicationGatewaysStopFuture struct { @@ -2599,6 +2708,33 @@ type ApplicationGatewaysStopFuture struct { Result func(ApplicationGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysStopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysStopFuture.Result. +func (future *ApplicationGatewaysStopFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStopFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationGatewaysUpdateTagsFuture struct { @@ -2608,6 +2744,39 @@ type ApplicationGatewaysUpdateTagsFuture struct { Result func(ApplicationGatewaysClient) (ApplicationGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysUpdateTagsFuture.Result. +func (future *ApplicationGatewaysUpdateTagsFuture) result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ag.Response.Response, err = future.GetResult(sender); err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { + ag, err = client.UpdateTagsResponder(ag.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", ag.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationGatewayTrustedRootCertificate trusted Root certificates of an application gateway. type ApplicationGatewayTrustedRootCertificate struct { *ApplicationGatewayTrustedRootCertificatePropertiesFormat `json:"properties,omitempty"` @@ -3150,6 +3319,39 @@ type ApplicationSecurityGroupsCreateOrUpdateFuture struct { Result func(ApplicationSecurityGroupsClient) (ApplicationSecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationSecurityGroupsCreateOrUpdateFuture.Result. +func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) result(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if asg.Response.Response, err = future.GetResult(sender); err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { + asg, err = client.CreateOrUpdateResponder(asg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", asg.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationSecurityGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationSecurityGroupsDeleteFuture struct { @@ -3159,6 +3361,33 @@ type ApplicationSecurityGroupsDeleteFuture struct { Result func(ApplicationSecurityGroupsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationSecurityGroupsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationSecurityGroupsDeleteFuture.Result. +func (future *ApplicationSecurityGroupsDeleteFuture) result(client ApplicationSecurityGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationSecurityGroupsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationSecurityGroupsUpdateTagsFuture struct { @@ -3168,6 +3397,39 @@ type ApplicationSecurityGroupsUpdateTagsFuture struct { Result func(ApplicationSecurityGroupsClient) (ApplicationSecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationSecurityGroupsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationSecurityGroupsUpdateTagsFuture.Result. +func (future *ApplicationSecurityGroupsUpdateTagsFuture) result(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if asg.Response.Response, err = future.GetResult(sender); err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { + asg, err = client.UpdateTagsResponder(asg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", asg.Response.Response, "Failure responding to request") + } + } + return +} + // AuthorizationListResult response for ListAuthorizations API service call retrieves all authorizations // that belongs to an ExpressRouteCircuit. type AuthorizationListResult struct { @@ -4614,6 +4876,39 @@ type AzureFirewallsCreateOrUpdateFuture struct { Result func(AzureFirewallsClient) (AzureFirewall, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AzureFirewallsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AzureFirewallsCreateOrUpdateFuture.Result. +func (future *AzureFirewallsCreateOrUpdateFuture) result(client AzureFirewallsClient) (af AzureFirewall, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if af.Response.Response, err = future.GetResult(sender); err == nil && af.Response.Response.StatusCode != http.StatusNoContent { + af, err = client.CreateOrUpdateResponder(af.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", af.Response.Response, "Failure responding to request") + } + } + return +} + // AzureFirewallsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type AzureFirewallsDeleteFuture struct { @@ -4623,6 +4918,33 @@ type AzureFirewallsDeleteFuture struct { Result func(AzureFirewallsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AzureFirewallsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AzureFirewallsDeleteFuture.Result. +func (future *AzureFirewallsDeleteFuture) result(client AzureFirewallsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AzureFirewallsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // AzureReachabilityReport azure reachability report details. type AzureReachabilityReport struct { autorest.Response `json:"-"` @@ -5398,6 +5720,39 @@ type ConnectionMonitorsCreateOrUpdateFuture struct { Result func(ConnectionMonitorsClient) (ConnectionMonitorResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsCreateOrUpdateFuture.Result. +func (future *ConnectionMonitorsCreateOrUpdateFuture) result(client ConnectionMonitorsClient) (cmr ConnectionMonitorResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cmr.Response.Response, err = future.GetResult(sender); err == nil && cmr.Response.Response.StatusCode != http.StatusNoContent { + cmr, err = client.CreateOrUpdateResponder(cmr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", cmr.Response.Response, "Failure responding to request") + } + } + return +} + // ConnectionMonitorsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ConnectionMonitorsDeleteFuture struct { @@ -5407,6 +5762,33 @@ type ConnectionMonitorsDeleteFuture struct { Result func(ConnectionMonitorsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsDeleteFuture.Result. +func (future *ConnectionMonitorsDeleteFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectionMonitorSource describes the source of connection monitor. type ConnectionMonitorSource struct { // ResourceID - The ID of the resource used as the source by connection monitor. @@ -5424,6 +5806,39 @@ type ConnectionMonitorsQueryFuture struct { Result func(ConnectionMonitorsClient) (ConnectionMonitorQueryResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsQueryFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsQueryFuture.Result. +func (future *ConnectionMonitorsQueryFuture) result(client ConnectionMonitorsClient) (cmqr ConnectionMonitorQueryResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsQueryFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cmqr.Response.Response, err = future.GetResult(sender); err == nil && cmqr.Response.Response.StatusCode != http.StatusNoContent { + cmqr, err = client.QueryResponder(cmqr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", cmqr.Response.Response, "Failure responding to request") + } + } + return +} + // ConnectionMonitorsStartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ConnectionMonitorsStartFuture struct { @@ -5433,6 +5848,33 @@ type ConnectionMonitorsStartFuture struct { Result func(ConnectionMonitorsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsStartFuture.Result. +func (future *ConnectionMonitorsStartFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStartFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectionMonitorsStopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ConnectionMonitorsStopFuture struct { @@ -5442,6 +5884,33 @@ type ConnectionMonitorsStopFuture struct { Result func(ConnectionMonitorsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsStopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsStopFuture.Result. +func (future *ConnectionMonitorsStopFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStopFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectionResetSharedKey the virtual network connection reset shared key type ConnectionResetSharedKey struct { autorest.Response `json:"-"` @@ -5923,6 +6392,39 @@ type DdosCustomPoliciesCreateOrUpdateFuture struct { Result func(DdosCustomPoliciesClient) (DdosCustomPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosCustomPoliciesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosCustomPoliciesCreateOrUpdateFuture.Result. +func (future *DdosCustomPoliciesCreateOrUpdateFuture) result(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dcp.Response.Response, err = future.GetResult(sender); err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { + dcp, err = client.CreateOrUpdateResponder(dcp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", dcp.Response.Response, "Failure responding to request") + } + } + return +} + // DdosCustomPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosCustomPoliciesDeleteFuture struct { @@ -5932,6 +6434,33 @@ type DdosCustomPoliciesDeleteFuture struct { Result func(DdosCustomPoliciesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosCustomPoliciesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosCustomPoliciesDeleteFuture.Result. +func (future *DdosCustomPoliciesDeleteFuture) result(client DdosCustomPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DdosCustomPoliciesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosCustomPoliciesUpdateTagsFuture struct { @@ -5941,6 +6470,39 @@ type DdosCustomPoliciesUpdateTagsFuture struct { Result func(DdosCustomPoliciesClient) (DdosCustomPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosCustomPoliciesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosCustomPoliciesUpdateTagsFuture.Result. +func (future *DdosCustomPoliciesUpdateTagsFuture) result(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dcp.Response.Response, err = future.GetResult(sender); err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { + dcp, err = client.UpdateTagsResponder(dcp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", dcp.Response.Response, "Failure responding to request") + } + } + return +} + // DdosCustomPolicy a DDoS custom policy in a resource group. type DdosCustomPolicy struct { autorest.Response `json:"-"` @@ -6376,6 +6938,39 @@ type DdosProtectionPlansCreateOrUpdateFuture struct { Result func(DdosProtectionPlansClient) (DdosProtectionPlan, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosProtectionPlansCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosProtectionPlansCreateOrUpdateFuture.Result. +func (future *DdosProtectionPlansCreateOrUpdateFuture) result(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dpp.Response.Response, err = future.GetResult(sender); err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { + dpp, err = client.CreateOrUpdateResponder(dpp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", dpp.Response.Response, "Failure responding to request") + } + } + return +} + // DdosProtectionPlansDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosProtectionPlansDeleteFuture struct { @@ -6385,6 +6980,33 @@ type DdosProtectionPlansDeleteFuture struct { Result func(DdosProtectionPlansClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosProtectionPlansDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosProtectionPlansDeleteFuture.Result. +func (future *DdosProtectionPlansDeleteFuture) result(client DdosProtectionPlansClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DdosProtectionPlansUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosProtectionPlansUpdateTagsFuture struct { @@ -6394,15 +7016,48 @@ type DdosProtectionPlansUpdateTagsFuture struct { Result func(DdosProtectionPlansClient) (DdosProtectionPlan, error) } -// DdosSettings contains the DDoS protection settings of the public IP. -type DdosSettings struct { - // DdosCustomPolicy - The DDoS custom policy associated with the public IP. - DdosCustomPolicy *SubResource `json:"ddosCustomPolicy,omitempty"` - // ProtectionCoverage - The DDoS protection policy customizability of the public IP. Only standard coverage will have the ability to be customized. Possible values include: 'ProtectionCoverageBasic', 'ProtectionCoverageStandard' - ProtectionCoverage ProtectionCoverage `json:"protectionCoverage,omitempty"` +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosProtectionPlansUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil } -// Delegation details the service to which the subnet is delegated. +// result is the default implementation for DdosProtectionPlansUpdateTagsFuture.Result. +func (future *DdosProtectionPlansUpdateTagsFuture) result(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dpp.Response.Response, err = future.GetResult(sender); err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { + dpp, err = client.UpdateTagsResponder(dpp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", dpp.Response.Response, "Failure responding to request") + } + } + return +} + +// DdosSettings contains the DDoS protection settings of the public IP. +type DdosSettings struct { + // DdosCustomPolicy - The DDoS custom policy associated with the public IP. + DdosCustomPolicy *SubResource `json:"ddosCustomPolicy,omitempty"` + // ProtectionCoverage - The DDoS protection policy customizability of the public IP. Only standard coverage will have the ability to be customized. Possible values include: 'ProtectionCoverageBasic', 'ProtectionCoverageStandard' + ProtectionCoverage ProtectionCoverage `json:"protectionCoverage,omitempty"` +} + +// Delegation details the service to which the subnet is delegated. type Delegation struct { // ServiceDelegationPropertiesFormat - Properties of the subnet. *ServiceDelegationPropertiesFormat `json:"properties,omitempty"` @@ -7101,6 +7756,39 @@ type ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitAuthorizationsClient) (ExpressRouteCircuitAuthorization, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) result(client ExpressRouteCircuitAuthorizationsClient) (erca ExpressRouteCircuitAuthorization, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erca.Response.Response, err = future.GetResult(sender); err == nil && erca.Response.Response.StatusCode != http.StatusNoContent { + erca, err = client.CreateOrUpdateResponder(erca.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", erca.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitAuthorizationsDeleteFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type ExpressRouteCircuitAuthorizationsDeleteFuture struct { @@ -7110,6 +7798,33 @@ type ExpressRouteCircuitAuthorizationsDeleteFuture struct { Result func(ExpressRouteCircuitAuthorizationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitAuthorizationsDeleteFuture.Result. +func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) result(client ExpressRouteCircuitAuthorizationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitConnection express Route Circuit Connection in an ExpressRouteCircuitPeering // resource. type ExpressRouteCircuitConnection struct { @@ -7393,6 +8108,39 @@ type ExpressRouteCircuitConnectionsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitConnectionsClient) (ExpressRouteCircuitConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitConnectionsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) result(client ExpressRouteCircuitConnectionsClient) (ercc ExpressRouteCircuitConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { + ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitConnectionsDeleteFuture struct { @@ -7402,6 +8150,33 @@ type ExpressRouteCircuitConnectionsDeleteFuture struct { Result func(ExpressRouteCircuitConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitConnectionsDeleteFuture.Result. +func (future *ExpressRouteCircuitConnectionsDeleteFuture) result(client ExpressRouteCircuitConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitListResult response for ListExpressRouteCircuit API service call. type ExpressRouteCircuitListResult struct { autorest.Response `json:"-"` @@ -7939,6 +8714,39 @@ type ExpressRouteCircuitPeeringsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitPeeringsClient) (ExpressRouteCircuitPeering, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitPeeringsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) result(client ExpressRouteCircuitPeeringsClient) (ercp ExpressRouteCircuitPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercp.Response.Response, err = future.GetResult(sender); err == nil && ercp.Response.Response.StatusCode != http.StatusNoContent { + ercp, err = client.CreateOrUpdateResponder(ercp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", ercp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitPeeringsDeleteFuture struct { @@ -7948,6 +8756,33 @@ type ExpressRouteCircuitPeeringsDeleteFuture struct { Result func(ExpressRouteCircuitPeeringsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitPeeringsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitPeeringsDeleteFuture.Result. +func (future *ExpressRouteCircuitPeeringsDeleteFuture) result(client ExpressRouteCircuitPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitPropertiesFormat properties of ExpressRouteCircuit. type ExpressRouteCircuitPropertiesFormat struct { // AllowClassicOperations - Allow classic operations @@ -8083,6 +8918,39 @@ type ExpressRouteCircuitsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuit, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitsCreateOrUpdateFuture) result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erc.Response.Response, err = future.GetResult(sender); err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { + erc, err = client.CreateOrUpdateResponder(erc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitsDeleteFuture struct { @@ -8092,6 +8960,33 @@ type ExpressRouteCircuitsDeleteFuture struct { Result func(ExpressRouteCircuitsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsDeleteFuture.Result. +func (future *ExpressRouteCircuitsDeleteFuture) result(client ExpressRouteCircuitsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitServiceProviderProperties contains ServiceProviderProperties in an // ExpressRouteCircuit. type ExpressRouteCircuitServiceProviderProperties struct { @@ -8122,6 +9017,39 @@ type ExpressRouteCircuitsListArpTableFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsArpTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsListArpTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsListArpTableFuture.Result. +func (future *ExpressRouteCircuitsListArpTableFuture) result(client ExpressRouteCircuitsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListArpTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercatlr.Response.Response, err = future.GetResult(sender); err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { + ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsListRoutesTableFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitsListRoutesTableFuture struct { @@ -8131,6 +9059,39 @@ type ExpressRouteCircuitsListRoutesTableFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsRoutesTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsListRoutesTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsListRoutesTableFuture.Result. +func (future *ExpressRouteCircuitsListRoutesTableFuture) result(client ExpressRouteCircuitsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercrtlr.Response.Response, err = future.GetResult(sender); err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { + ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsListRoutesTableSummaryFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type ExpressRouteCircuitsListRoutesTableSummaryFuture struct { @@ -8140,6 +9101,39 @@ type ExpressRouteCircuitsListRoutesTableSummaryFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsRoutesTableSummaryListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsListRoutesTableSummaryFuture.Result. +func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) result(client ExpressRouteCircuitsClient) (ercrtslr ExpressRouteCircuitsRoutesTableSummaryListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableSummaryFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercrtslr.Response.Response, err = future.GetResult(sender); err == nil && ercrtslr.Response.Response.StatusCode != http.StatusNoContent { + ercrtslr, err = client.ListRoutesTableSummaryResponder(ercrtslr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", ercrtslr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsRoutesTableListResult response for ListRoutesTable associated with the Express Route // Circuits API. type ExpressRouteCircuitsRoutesTableListResult struct { @@ -8182,6 +9176,39 @@ type ExpressRouteCircuitsUpdateTagsFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuit, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsUpdateTagsFuture.Result. +func (future *ExpressRouteCircuitsUpdateTagsFuture) result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erc.Response.Response, err = future.GetResult(sender); err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { + erc, err = client.UpdateTagsResponder(erc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", erc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteConnection expressRouteConnection resource. type ExpressRouteConnection struct { autorest.Response `json:"-"` @@ -8298,6 +9325,39 @@ type ExpressRouteConnectionsCreateOrUpdateFuture struct { Result func(ExpressRouteConnectionsClient) (ExpressRouteConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteConnectionsCreateOrUpdateFuture.Result. +func (future *ExpressRouteConnectionsCreateOrUpdateFuture) result(client ExpressRouteConnectionsClient) (erc ExpressRouteConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erc.Response.Response, err = future.GetResult(sender); err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { + erc, err = client.CreateOrUpdateResponder(erc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteConnectionsDeleteFuture struct { @@ -8307,6 +9367,33 @@ type ExpressRouteConnectionsDeleteFuture struct { Result func(ExpressRouteConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteConnectionsDeleteFuture.Result. +func (future *ExpressRouteConnectionsDeleteFuture) result(client ExpressRouteConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCrossConnection expressRouteCrossConnection resource type ExpressRouteCrossConnection struct { autorest.Response `json:"-"` @@ -8920,6 +10007,39 @@ type ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture struct { Result func(ExpressRouteCrossConnectionPeeringsClient) (ExpressRouteCrossConnectionPeering, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) result(client ExpressRouteCrossConnectionPeeringsClient) (erccp ExpressRouteCrossConnectionPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erccp.Response.Response, err = future.GetResult(sender); err == nil && erccp.Response.Response.StatusCode != http.StatusNoContent { + erccp, err = client.CreateOrUpdateResponder(erccp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", erccp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionPeeringsDeleteFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type ExpressRouteCrossConnectionPeeringsDeleteFuture struct { @@ -8929,6 +10049,33 @@ type ExpressRouteCrossConnectionPeeringsDeleteFuture struct { Result func(ExpressRouteCrossConnectionPeeringsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionPeeringsDeleteFuture.Result. +func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) result(client ExpressRouteCrossConnectionPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCrossConnectionProperties properties of ExpressRouteCrossConnection. type ExpressRouteCrossConnectionProperties struct { // PrimaryAzurePort - READ-ONLY; The name of the primary port. @@ -8998,6 +10145,39 @@ type ExpressRouteCrossConnectionsCreateOrUpdateFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { + ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionsListArpTableFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type ExpressRouteCrossConnectionsListArpTableFuture struct { @@ -9007,6 +10187,39 @@ type ExpressRouteCrossConnectionsListArpTableFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCircuitsArpTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsListArpTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsListArpTableFuture.Result. +func (future *ExpressRouteCrossConnectionsListArpTableFuture) result(client ExpressRouteCrossConnectionsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListArpTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercatlr.Response.Response, err = future.GetResult(sender); err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { + ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionsListRoutesTableFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type ExpressRouteCrossConnectionsListRoutesTableFuture struct { @@ -9016,6 +10229,39 @@ type ExpressRouteCrossConnectionsListRoutesTableFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCircuitsRoutesTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsListRoutesTableFuture.Result. +func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) result(client ExpressRouteCrossConnectionsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercrtlr.Response.Response, err = future.GetResult(sender); err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { + ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionsListRoutesTableSummaryFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type ExpressRouteCrossConnectionsListRoutesTableSummaryFuture struct { @@ -9025,16 +10271,49 @@ type ExpressRouteCrossConnectionsListRoutesTableSummaryFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnectionsRoutesTableSummaryListResult, error) } -// ExpressRouteCrossConnectionsRoutesTableSummaryListResult response for ListRoutesTable associated with -// the Express Route Cross Connections. -type ExpressRouteCrossConnectionsRoutesTableSummaryListResult struct { - autorest.Response `json:"-"` - // Value - A list of the routes table. - Value *[]ExpressRouteCrossConnectionRoutesTableSummary `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of results. - NextLink *string `json:"nextLink,omitempty"` -} - +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsListRoutesTableSummaryFuture.Result. +func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) result(client ExpressRouteCrossConnectionsClient) (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erccrtslr.Response.Response, err = future.GetResult(sender); err == nil && erccrtslr.Response.Response.StatusCode != http.StatusNoContent { + erccrtslr, err = client.ListRoutesTableSummaryResponder(erccrtslr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", erccrtslr.Response.Response, "Failure responding to request") + } + } + return +} + +// ExpressRouteCrossConnectionsRoutesTableSummaryListResult response for ListRoutesTable associated with +// the Express Route Cross Connections. +type ExpressRouteCrossConnectionsRoutesTableSummaryListResult struct { + autorest.Response `json:"-"` + // Value - A list of the routes table. + Value *[]ExpressRouteCrossConnectionRoutesTableSummary `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + // MarshalJSON is the custom marshaler for ExpressRouteCrossConnectionsRoutesTableSummaryListResult. func (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) @@ -9053,6 +10332,39 @@ type ExpressRouteCrossConnectionsUpdateTagsFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsUpdateTagsFuture.Result. +func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { + ercc, err = client.UpdateTagsResponder(ercc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", ercc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteGateway expressRoute gateway resource. type ExpressRouteGateway struct { autorest.Response `json:"-"` @@ -9222,6 +10534,39 @@ type ExpressRouteGatewaysCreateOrUpdateFuture struct { Result func(ExpressRouteGatewaysClient) (ExpressRouteGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteGatewaysCreateOrUpdateFuture.Result. +func (future *ExpressRouteGatewaysCreateOrUpdateFuture) result(client ExpressRouteGatewaysClient) (erg ExpressRouteGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erg.Response.Response, err = future.GetResult(sender); err == nil && erg.Response.Response.StatusCode != http.StatusNoContent { + erg, err = client.CreateOrUpdateResponder(erg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", erg.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteGatewaysDeleteFuture struct { @@ -9231,6 +10576,33 @@ type ExpressRouteGatewaysDeleteFuture struct { Result func(ExpressRouteGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteGatewaysDeleteFuture.Result. +func (future *ExpressRouteGatewaysDeleteFuture) result(client ExpressRouteGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteLink expressRouteLink child resource definition. type ExpressRouteLink struct { autorest.Response `json:"-"` @@ -9826,6 +11198,39 @@ type ExpressRoutePortsCreateOrUpdateFuture struct { Result func(ExpressRoutePortsClient) (ExpressRoutePort, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRoutePortsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRoutePortsCreateOrUpdateFuture.Result. +func (future *ExpressRoutePortsCreateOrUpdateFuture) result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erp.Response.Response, err = future.GetResult(sender); err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { + erp, err = client.CreateOrUpdateResponder(erp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", erp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRoutePortsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ExpressRoutePortsDeleteFuture struct { @@ -9835,6 +11240,33 @@ type ExpressRoutePortsDeleteFuture struct { Result func(ExpressRoutePortsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRoutePortsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRoutePortsDeleteFuture.Result. +func (future *ExpressRoutePortsDeleteFuture) result(client ExpressRoutePortsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRoutePortsLocation definition of the ExpressRoutePorts peering location resource. type ExpressRoutePortsLocation struct { autorest.Response `json:"-"` @@ -10138,6 +11570,39 @@ type ExpressRoutePortsUpdateTagsFuture struct { Result func(ExpressRoutePortsClient) (ExpressRoutePort, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRoutePortsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRoutePortsUpdateTagsFuture.Result. +func (future *ExpressRoutePortsUpdateTagsFuture) result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erp.Response.Response, err = future.GetResult(sender); err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { + erp, err = client.UpdateTagsResponder(erp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", erp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteServiceProvider a ExpressRouteResourceProvider object. type ExpressRouteServiceProvider struct { *ExpressRouteServiceProviderPropertiesFormat `json:"properties,omitempty"` @@ -11211,6 +12676,39 @@ type InboundNatRulesCreateOrUpdateFuture struct { Result func(InboundNatRulesClient) (InboundNatRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InboundNatRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InboundNatRulesCreateOrUpdateFuture.Result. +func (future *InboundNatRulesCreateOrUpdateFuture) result(client InboundNatRulesClient) (inr InboundNatRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if inr.Response.Response, err = future.GetResult(sender); err == nil && inr.Response.Response.StatusCode != http.StatusNoContent { + inr, err = client.CreateOrUpdateResponder(inr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", inr.Response.Response, "Failure responding to request") + } + } + return +} + // InboundNatRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type InboundNatRulesDeleteFuture struct { @@ -11220,6 +12718,33 @@ type InboundNatRulesDeleteFuture struct { Result func(InboundNatRulesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InboundNatRulesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InboundNatRulesDeleteFuture.Result. +func (future *InboundNatRulesDeleteFuture) result(client InboundNatRulesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // IntentPolicy network Intent Policy resource. type IntentPolicy struct { // Etag - Gets a unique read-only string that changes whenever the resource is updated. @@ -11736,6 +13261,39 @@ type InterfaceEndpointsCreateOrUpdateFuture struct { Result func(InterfaceEndpointsClient) (InterfaceEndpoint, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfaceEndpointsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfaceEndpointsCreateOrUpdateFuture.Result. +func (future *InterfaceEndpointsCreateOrUpdateFuture) result(client InterfaceEndpointsClient) (ie InterfaceEndpoint, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfaceEndpointsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ie.Response.Response, err = future.GetResult(sender); err == nil && ie.Response.Response.StatusCode != http.StatusNoContent { + ie, err = client.CreateOrUpdateResponder(ie.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsCreateOrUpdateFuture", "Result", ie.Response.Response, "Failure responding to request") + } + } + return +} + // InterfaceEndpointsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type InterfaceEndpointsDeleteFuture struct { @@ -11745,6 +13303,33 @@ type InterfaceEndpointsDeleteFuture struct { Result func(InterfaceEndpointsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfaceEndpointsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfaceEndpointsDeleteFuture.Result. +func (future *InterfaceEndpointsDeleteFuture) result(client InterfaceEndpointsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceEndpointsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfaceEndpointsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // InterfaceIPConfiguration iPConfiguration in a network interface. type InterfaceIPConfiguration struct { autorest.Response `json:"-"` @@ -12435,6 +14020,39 @@ type InterfacesCreateOrUpdateFuture struct { Result func(InterfacesClient) (Interface, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesCreateOrUpdateFuture.Result. +func (future *InterfacesCreateOrUpdateFuture) result(client InterfacesClient) (i Interface, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.CreateOrUpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // InterfacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type InterfacesDeleteFuture struct { @@ -12444,6 +14062,33 @@ type InterfacesDeleteFuture struct { Result func(InterfacesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesDeleteFuture.Result. +func (future *InterfacesDeleteFuture) result(client InterfacesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // InterfacesGetEffectiveRouteTableFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type InterfacesGetEffectiveRouteTableFuture struct { @@ -12453,6 +14098,39 @@ type InterfacesGetEffectiveRouteTableFuture struct { Result func(InterfacesClient) (EffectiveRouteListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesGetEffectiveRouteTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesGetEffectiveRouteTableFuture.Result. +func (future *InterfacesGetEffectiveRouteTableFuture) result(client InterfacesClient) (erlr EffectiveRouteListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesGetEffectiveRouteTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erlr.Response.Response, err = future.GetResult(sender); err == nil && erlr.Response.Response.StatusCode != http.StatusNoContent { + erlr, err = client.GetEffectiveRouteTableResponder(erlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", erlr.Response.Response, "Failure responding to request") + } + } + return +} + // InterfacesListEffectiveNetworkSecurityGroupsFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type InterfacesListEffectiveNetworkSecurityGroupsFuture struct { @@ -12462,6 +14140,39 @@ type InterfacesListEffectiveNetworkSecurityGroupsFuture struct { Result func(InterfacesClient) (EffectiveNetworkSecurityGroupListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesListEffectiveNetworkSecurityGroupsFuture.Result. +func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) result(client InterfacesClient) (ensglr EffectiveNetworkSecurityGroupListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesListEffectiveNetworkSecurityGroupsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ensglr.Response.Response, err = future.GetResult(sender); err == nil && ensglr.Response.Response.StatusCode != http.StatusNoContent { + ensglr, err = client.ListEffectiveNetworkSecurityGroupsResponder(ensglr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", ensglr.Response.Response, "Failure responding to request") + } + } + return +} + // InterfacesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type InterfacesUpdateTagsFuture struct { @@ -12471,6 +14182,39 @@ type InterfacesUpdateTagsFuture struct { Result func(InterfacesClient) (Interface, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesUpdateTagsFuture.Result. +func (future *InterfacesUpdateTagsFuture) result(client InterfacesClient) (i Interface, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.UpdateTagsResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // InterfaceTapConfiguration tap configuration in a Network Interface type InterfaceTapConfiguration struct { autorest.Response `json:"-"` @@ -12759,6 +14503,39 @@ type InterfaceTapConfigurationsCreateOrUpdateFuture struct { Result func(InterfaceTapConfigurationsClient) (InterfaceTapConfiguration, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfaceTapConfigurationsCreateOrUpdateFuture.Result. +func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) result(client InterfaceTapConfigurationsClient) (itc InterfaceTapConfiguration, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if itc.Response.Response, err = future.GetResult(sender); err == nil && itc.Response.Response.StatusCode != http.StatusNoContent { + itc, err = client.CreateOrUpdateResponder(itc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", itc.Response.Response, "Failure responding to request") + } + } + return +} + // InterfaceTapConfigurationsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type InterfaceTapConfigurationsDeleteFuture struct { @@ -12768,6 +14545,33 @@ type InterfaceTapConfigurationsDeleteFuture struct { Result func(InterfaceTapConfigurationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfaceTapConfigurationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfaceTapConfigurationsDeleteFuture.Result. +func (future *InterfaceTapConfigurationsDeleteFuture) result(client InterfaceTapConfigurationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // IPAddressAvailabilityResult response for CheckIPAddressAvailability API service call type IPAddressAvailabilityResult struct { autorest.Response `json:"-"` @@ -15486,6 +17290,39 @@ type LoadBalancersCreateOrUpdateFuture struct { Result func(LoadBalancersClient) (LoadBalancer, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LoadBalancersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LoadBalancersCreateOrUpdateFuture.Result. +func (future *LoadBalancersCreateOrUpdateFuture) result(client LoadBalancersClient) (lb LoadBalancer, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lb.Response.Response, err = future.GetResult(sender); err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { + lb, err = client.CreateOrUpdateResponder(lb.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", lb.Response.Response, "Failure responding to request") + } + } + return +} + // LoadBalancersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type LoadBalancersDeleteFuture struct { @@ -15495,6 +17332,33 @@ type LoadBalancersDeleteFuture struct { Result func(LoadBalancersClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LoadBalancersDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LoadBalancersDeleteFuture.Result. +func (future *LoadBalancersDeleteFuture) result(client LoadBalancersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // LoadBalancerSku SKU of a load balancer type LoadBalancerSku struct { // Name - Name of a load balancer SKU. Possible values include: 'LoadBalancerSkuNameBasic', 'LoadBalancerSkuNameStandard' @@ -15510,13 +17374,46 @@ type LoadBalancersUpdateTagsFuture struct { Result func(LoadBalancersClient) (LoadBalancer, error) } -// LoadBalancingRule a load balancing rule for a load balancer. -type LoadBalancingRule struct { - autorest.Response `json:"-"` - // LoadBalancingRulePropertiesFormat - Properties of load balancer load balancing rule. - *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"` - // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. - Name *string `json:"name,omitempty"` +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LoadBalancersUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LoadBalancersUpdateTagsFuture.Result. +func (future *LoadBalancersUpdateTagsFuture) result(client LoadBalancersClient) (lb LoadBalancer, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lb.Response.Response, err = future.GetResult(sender); err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { + lb, err = client.UpdateTagsResponder(lb.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", lb.Response.Response, "Failure responding to request") + } + } + return +} + +// LoadBalancingRule a load balancing rule for a load balancer. +type LoadBalancingRule struct { + autorest.Response `json:"-"` + // LoadBalancingRulePropertiesFormat - Properties of load balancer load balancing rule. + *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` // Etag - A unique read-only string that changes whenever the resource is updated. Etag *string `json:"etag,omitempty"` // ID - Resource ID. @@ -15948,6 +17845,39 @@ type LocalNetworkGatewaysCreateOrUpdateFuture struct { Result func(LocalNetworkGatewaysClient) (LocalNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LocalNetworkGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LocalNetworkGatewaysCreateOrUpdateFuture.Result. +func (future *LocalNetworkGatewaysCreateOrUpdateFuture) result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lng.Response.Response, err = future.GetResult(sender); err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { + lng, err = client.CreateOrUpdateResponder(lng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", lng.Response.Response, "Failure responding to request") + } + } + return +} + // LocalNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type LocalNetworkGatewaysDeleteFuture struct { @@ -15957,6 +17887,33 @@ type LocalNetworkGatewaysDeleteFuture struct { Result func(LocalNetworkGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LocalNetworkGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LocalNetworkGatewaysDeleteFuture.Result. +func (future *LocalNetworkGatewaysDeleteFuture) result(client LocalNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // LocalNetworkGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type LocalNetworkGatewaysUpdateTagsFuture struct { @@ -15966,6 +17923,39 @@ type LocalNetworkGatewaysUpdateTagsFuture struct { Result func(LocalNetworkGatewaysClient) (LocalNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LocalNetworkGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LocalNetworkGatewaysUpdateTagsFuture.Result. +func (future *LocalNetworkGatewaysUpdateTagsFuture) result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lng.Response.Response, err = future.GetResult(sender); err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { + lng, err = client.UpdateTagsResponder(lng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", lng.Response.Response, "Failure responding to request") + } + } + return +} + // LogSpecification description of logging specification. type LogSpecification struct { // Name - The name of the specification. @@ -16620,6 +18610,39 @@ type P2sVpnGatewaysCreateOrUpdateFuture struct { Result func(P2sVpnGatewaysClient) (P2SVpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysCreateOrUpdateFuture.Result. +func (future *P2sVpnGatewaysCreateOrUpdateFuture) result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pvg.Response.Response, err = future.GetResult(sender); err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { + pvg, err = client.CreateOrUpdateResponder(pvg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", pvg.Response.Response, "Failure responding to request") + } + } + return +} + // P2sVpnGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type P2sVpnGatewaysDeleteFuture struct { @@ -16629,6 +18652,33 @@ type P2sVpnGatewaysDeleteFuture struct { Result func(P2sVpnGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysDeleteFuture.Result. +func (future *P2sVpnGatewaysDeleteFuture) result(client P2sVpnGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // P2sVpnGatewaysGenerateVpnProfileFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type P2sVpnGatewaysGenerateVpnProfileFuture struct { @@ -16638,6 +18688,39 @@ type P2sVpnGatewaysGenerateVpnProfileFuture struct { Result func(P2sVpnGatewaysClient) (VpnProfileResponse, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysGenerateVpnProfileFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysGenerateVpnProfileFuture.Result. +func (future *P2sVpnGatewaysGenerateVpnProfileFuture) result(client P2sVpnGatewaysClient) (vpr VpnProfileResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysGenerateVpnProfileFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vpr.Response.Response, err = future.GetResult(sender); err == nil && vpr.Response.Response.StatusCode != http.StatusNoContent { + vpr, err = client.GenerateVpnProfileResponder(vpr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", vpr.Response.Response, "Failure responding to request") + } + } + return +} + // P2sVpnGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type P2sVpnGatewaysUpdateTagsFuture struct { @@ -16647,6 +18730,39 @@ type P2sVpnGatewaysUpdateTagsFuture struct { Result func(P2sVpnGatewaysClient) (P2SVpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysUpdateTagsFuture.Result. +func (future *P2sVpnGatewaysUpdateTagsFuture) result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pvg.Response.Response, err = future.GetResult(sender); err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { + pvg, err = client.UpdateTagsResponder(pvg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", pvg.Response.Response, "Failure responding to request") + } + } + return +} + // P2SVpnProfileParameters vpn Client Parameters for package generation type P2SVpnProfileParameters struct { // AuthenticationMethod - VPN client Authentication Method. Possible values are: 'EAPTLS' and 'EAPMSCHAPv2'. Possible values include: 'EAPTLS', 'EAPMSCHAPv2' @@ -17004,6 +19120,39 @@ type P2sVpnServerConfigurationsCreateOrUpdateFuture struct { Result func(P2sVpnServerConfigurationsClient) (P2SVpnServerConfiguration, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnServerConfigurationsCreateOrUpdateFuture.Result. +func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) result(client P2sVpnServerConfigurationsClient) (pvsc P2SVpnServerConfiguration, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pvsc.Response.Response, err = future.GetResult(sender); err == nil && pvsc.Response.Response.StatusCode != http.StatusNoContent { + pvsc, err = client.CreateOrUpdateResponder(pvsc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", pvsc.Response.Response, "Failure responding to request") + } + } + return +} + // P2sVpnServerConfigurationsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type P2sVpnServerConfigurationsDeleteFuture struct { @@ -17013,6 +19162,33 @@ type P2sVpnServerConfigurationsDeleteFuture struct { Result func(P2sVpnServerConfigurationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnServerConfigurationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnServerConfigurationsDeleteFuture.Result. +func (future *P2sVpnServerConfigurationsDeleteFuture) result(client P2sVpnServerConfigurationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // P2SVpnServerConfigVpnClientRevokedCertificate VPN client revoked certificate of // P2SVpnServerConfiguration. type P2SVpnServerConfigVpnClientRevokedCertificate struct { @@ -17401,6 +19577,39 @@ type PacketCapturesCreateFuture struct { Result func(PacketCapturesClient) (PacketCaptureResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesCreateFuture.Result. +func (future *PacketCapturesCreateFuture) result(client PacketCapturesClient) (pcr PacketCaptureResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pcr.Response.Response, err = future.GetResult(sender); err == nil && pcr.Response.Response.StatusCode != http.StatusNoContent { + pcr, err = client.CreateResponder(pcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", pcr.Response.Response, "Failure responding to request") + } + } + return +} + // PacketCapturesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PacketCapturesDeleteFuture struct { @@ -17410,6 +19619,33 @@ type PacketCapturesDeleteFuture struct { Result func(PacketCapturesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesDeleteFuture.Result. +func (future *PacketCapturesDeleteFuture) result(client PacketCapturesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PacketCapturesGetStatusFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PacketCapturesGetStatusFuture struct { @@ -17419,6 +19655,39 @@ type PacketCapturesGetStatusFuture struct { Result func(PacketCapturesClient) (PacketCaptureQueryStatusResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesGetStatusFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesGetStatusFuture.Result. +func (future *PacketCapturesGetStatusFuture) result(client PacketCapturesClient) (pcqsr PacketCaptureQueryStatusResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesGetStatusFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pcqsr.Response.Response, err = future.GetResult(sender); err == nil && pcqsr.Response.Response.StatusCode != http.StatusNoContent { + pcqsr, err = client.GetStatusResponder(pcqsr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", pcqsr.Response.Response, "Failure responding to request") + } + } + return +} + // PacketCapturesStopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PacketCapturesStopFuture struct { @@ -17428,6 +19697,33 @@ type PacketCapturesStopFuture struct { Result func(PacketCapturesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesStopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesStopFuture.Result. +func (future *PacketCapturesStopFuture) result(client PacketCapturesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesStopFuture") + return + } + ar.Response = future.Response() + return +} + // PacketCaptureStorageLocation describes the storage location for a packet capture session. type PacketCaptureStorageLocation struct { // StorageID - The ID of the storage account to save the packet capture session. Required if no local file path is provided. @@ -18344,6 +20640,33 @@ type ProfilesDeleteFuture struct { Result func(ProfilesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ProfilesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ProfilesDeleteFuture.Result. +func (future *ProfilesDeleteFuture) result(client ProfilesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ProfilesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ProfilesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ProtocolConfiguration configuration of the protocol. type ProtocolConfiguration struct { HTTPConfiguration *HTTPConfiguration `json:"HTTPConfiguration,omitempty"` @@ -18526,6 +20849,39 @@ type PublicIPAddressesCreateOrUpdateFuture struct { Result func(PublicIPAddressesClient) (PublicIPAddress, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPAddressesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPAddressesCreateOrUpdateFuture.Result. +func (future *PublicIPAddressesCreateOrUpdateFuture) result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pia.Response.Response, err = future.GetResult(sender); err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { + pia, err = client.CreateOrUpdateResponder(pia.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", pia.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPAddressesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PublicIPAddressesDeleteFuture struct { @@ -18535,6 +20891,33 @@ type PublicIPAddressesDeleteFuture struct { Result func(PublicIPAddressesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPAddressesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPAddressesDeleteFuture.Result. +func (future *PublicIPAddressesDeleteFuture) result(client PublicIPAddressesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PublicIPAddressesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type PublicIPAddressesUpdateTagsFuture struct { @@ -18544,6 +20927,39 @@ type PublicIPAddressesUpdateTagsFuture struct { Result func(PublicIPAddressesClient) (PublicIPAddress, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPAddressesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPAddressesUpdateTagsFuture.Result. +func (future *PublicIPAddressesUpdateTagsFuture) result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pia.Response.Response, err = future.GetResult(sender); err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { + pia, err = client.UpdateTagsResponder(pia.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", pia.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPAddressListResult response for ListPublicIpAddresses API service call. type PublicIPAddressListResult struct { autorest.Response `json:"-"` @@ -18926,6 +21342,39 @@ type PublicIPPrefixesCreateOrUpdateFuture struct { Result func(PublicIPPrefixesClient) (PublicIPPrefix, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPPrefixesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPPrefixesCreateOrUpdateFuture.Result. +func (future *PublicIPPrefixesCreateOrUpdateFuture) result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pip.Response.Response, err = future.GetResult(sender); err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { + pip, err = client.CreateOrUpdateResponder(pip.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", pip.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPPrefixesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PublicIPPrefixesDeleteFuture struct { @@ -18935,6 +21384,33 @@ type PublicIPPrefixesDeleteFuture struct { Result func(PublicIPPrefixesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPPrefixesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPPrefixesDeleteFuture.Result. +func (future *PublicIPPrefixesDeleteFuture) result(client PublicIPPrefixesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PublicIPPrefixesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type PublicIPPrefixesUpdateTagsFuture struct { @@ -18944,6 +21420,39 @@ type PublicIPPrefixesUpdateTagsFuture struct { Result func(PublicIPPrefixesClient) (PublicIPPrefix, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPPrefixesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPPrefixesUpdateTagsFuture.Result. +func (future *PublicIPPrefixesUpdateTagsFuture) result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pip.Response.Response, err = future.GetResult(sender); err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { + pip, err = client.UpdateTagsResponder(pip.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", pip.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPPrefixListResult response for ListPublicIpPrefixes API service call. type PublicIPPrefixListResult struct { autorest.Response `json:"-"` @@ -19969,6 +22478,39 @@ type RouteFilterRulesCreateOrUpdateFuture struct { Result func(RouteFilterRulesClient) (RouteFilterRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFilterRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFilterRulesCreateOrUpdateFuture.Result. +func (future *RouteFilterRulesCreateOrUpdateFuture) result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rfr.Response.Response, err = future.GetResult(sender); err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { + rfr, err = client.CreateOrUpdateResponder(rfr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") + } + } + return +} + // RouteFilterRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFilterRulesDeleteFuture struct { @@ -19978,6 +22520,33 @@ type RouteFilterRulesDeleteFuture struct { Result func(RouteFilterRulesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFilterRulesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFilterRulesDeleteFuture.Result. +func (future *RouteFilterRulesDeleteFuture) result(client RouteFilterRulesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteFilterRulesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFilterRulesUpdateFuture struct { @@ -19987,6 +22556,39 @@ type RouteFilterRulesUpdateFuture struct { Result func(RouteFilterRulesClient) (RouteFilterRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFilterRulesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFilterRulesUpdateFuture.Result. +func (future *RouteFilterRulesUpdateFuture) result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rfr.Response.Response, err = future.GetResult(sender); err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { + rfr, err = client.UpdateResponder(rfr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") + } + } + return +} + // RouteFiltersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type RouteFiltersCreateOrUpdateFuture struct { @@ -19996,6 +22598,39 @@ type RouteFiltersCreateOrUpdateFuture struct { Result func(RouteFiltersClient) (RouteFilter, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFiltersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFiltersCreateOrUpdateFuture.Result. +func (future *RouteFiltersCreateOrUpdateFuture) result(client RouteFiltersClient) (rf RouteFilter, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFiltersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rf.Response.Response, err = future.GetResult(sender); err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { + rf, err = client.CreateOrUpdateResponder(rf.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") + } + } + return +} + // RouteFiltersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFiltersDeleteFuture struct { @@ -20005,6 +22640,33 @@ type RouteFiltersDeleteFuture struct { Result func(RouteFiltersClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFiltersDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFiltersDeleteFuture.Result. +func (future *RouteFiltersDeleteFuture) result(client RouteFiltersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFiltersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteFiltersUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFiltersUpdateFuture struct { @@ -20014,6 +22676,39 @@ type RouteFiltersUpdateFuture struct { Result func(RouteFiltersClient) (RouteFilter, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFiltersUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFiltersUpdateFuture.Result. +func (future *RouteFiltersUpdateFuture) result(client RouteFiltersClient) (rf RouteFilter, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFiltersUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rf.Response.Response, err = future.GetResult(sender); err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { + rf, err = client.UpdateResponder(rf.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") + } + } + return +} + // RouteListResult response for the ListRoute API service call type RouteListResult struct { autorest.Response `json:"-"` @@ -20194,6 +22889,39 @@ type RoutesCreateOrUpdateFuture struct { Result func(RoutesClient) (Route, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RoutesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RoutesCreateOrUpdateFuture.Result. +func (future *RoutesCreateOrUpdateFuture) result(client RoutesClient) (r Route, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RoutesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.CreateOrUpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // RoutesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type RoutesDeleteFuture struct { azure.FutureAPI @@ -20202,6 +22930,33 @@ type RoutesDeleteFuture struct { Result func(RoutesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RoutesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RoutesDeleteFuture.Result. +func (future *RoutesDeleteFuture) result(client RoutesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RoutesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteTable route table resource. type RouteTable struct { autorest.Response `json:"-"` @@ -20515,6 +23270,39 @@ type RouteTablesCreateOrUpdateFuture struct { Result func(RouteTablesClient) (RouteTable, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteTablesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteTablesCreateOrUpdateFuture.Result. +func (future *RouteTablesCreateOrUpdateFuture) result(client RouteTablesClient) (rt RouteTable, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rt.Response.Response, err = future.GetResult(sender); err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { + rt, err = client.CreateOrUpdateResponder(rt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", rt.Response.Response, "Failure responding to request") + } + } + return +} + // RouteTablesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteTablesDeleteFuture struct { @@ -20524,6 +23312,33 @@ type RouteTablesDeleteFuture struct { Result func(RouteTablesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteTablesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteTablesDeleteFuture.Result. +func (future *RouteTablesDeleteFuture) result(client RouteTablesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteTablesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteTablesUpdateTagsFuture struct { @@ -20533,6 +23348,39 @@ type RouteTablesUpdateTagsFuture struct { Result func(RouteTablesClient) (RouteTable, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteTablesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteTablesUpdateTagsFuture.Result. +func (future *RouteTablesUpdateTagsFuture) result(client RouteTablesClient) (rt RouteTable, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rt.Response.Response, err = future.GetResult(sender); err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { + rt, err = client.UpdateTagsResponder(rt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", rt.Response.Response, "Failure responding to request") + } + } + return +} + // SecurityGroup networkSecurityGroup resource. type SecurityGroup struct { autorest.Response `json:"-"` @@ -20877,6 +23725,39 @@ type SecurityGroupsCreateOrUpdateFuture struct { Result func(SecurityGroupsClient) (SecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityGroupsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityGroupsCreateOrUpdateFuture.Result. +func (future *SecurityGroupsCreateOrUpdateFuture) result(client SecurityGroupsClient) (sg SecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sg.Response.Response, err = future.GetResult(sender); err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { + sg, err = client.CreateOrUpdateResponder(sg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", sg.Response.Response, "Failure responding to request") + } + } + return +} + // SecurityGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SecurityGroupsDeleteFuture struct { @@ -20886,6 +23767,33 @@ type SecurityGroupsDeleteFuture struct { Result func(SecurityGroupsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityGroupsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityGroupsDeleteFuture.Result. +func (future *SecurityGroupsDeleteFuture) result(client SecurityGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // SecurityGroupsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SecurityGroupsUpdateTagsFuture struct { @@ -20895,6 +23803,39 @@ type SecurityGroupsUpdateTagsFuture struct { Result func(SecurityGroupsClient) (SecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityGroupsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityGroupsUpdateTagsFuture.Result. +func (future *SecurityGroupsUpdateTagsFuture) result(client SecurityGroupsClient) (sg SecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sg.Response.Response, err = future.GetResult(sender); err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { + sg, err = client.UpdateTagsResponder(sg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", sg.Response.Response, "Failure responding to request") + } + } + return +} + // SecurityGroupViewParameters parameters that define the VM to check security groups for. type SecurityGroupViewParameters struct { // TargetResourceID - ID of the target VM. @@ -21205,6 +24146,39 @@ type SecurityRulesCreateOrUpdateFuture struct { Result func(SecurityRulesClient) (SecurityRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityRulesCreateOrUpdateFuture.Result. +func (future *SecurityRulesCreateOrUpdateFuture) result(client SecurityRulesClient) (sr SecurityRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sr.Response.Response, err = future.GetResult(sender); err == nil && sr.Response.Response.StatusCode != http.StatusNoContent { + sr, err = client.CreateOrUpdateResponder(sr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", sr.Response.Response, "Failure responding to request") + } + } + return +} + // SecurityRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SecurityRulesDeleteFuture struct { @@ -21214,6 +24188,33 @@ type SecurityRulesDeleteFuture struct { Result func(SecurityRulesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityRulesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityRulesDeleteFuture.Result. +func (future *SecurityRulesDeleteFuture) result(client SecurityRulesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityRulesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // SecurityRulesEvaluationResult network security rules evaluation result. type SecurityRulesEvaluationResult struct { // Name - Name of the network security rule. @@ -21361,6 +24362,39 @@ type ServiceEndpointPoliciesCreateOrUpdateFuture struct { Result func(ServiceEndpointPoliciesClient) (ServiceEndpointPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPoliciesCreateOrUpdateFuture.Result. +func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sep.Response.Response, err = future.GetResult(sender); err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { + sep, err = client.CreateOrUpdateResponder(sep.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") + } + } + return +} + // ServiceEndpointPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ServiceEndpointPoliciesDeleteFuture struct { @@ -21370,6 +24404,33 @@ type ServiceEndpointPoliciesDeleteFuture struct { Result func(ServiceEndpointPoliciesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPoliciesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPoliciesDeleteFuture.Result. +func (future *ServiceEndpointPoliciesDeleteFuture) result(client ServiceEndpointPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ServiceEndpointPoliciesUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ServiceEndpointPoliciesUpdateFuture struct { @@ -21379,6 +24440,39 @@ type ServiceEndpointPoliciesUpdateFuture struct { Result func(ServiceEndpointPoliciesClient) (ServiceEndpointPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPoliciesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPoliciesUpdateFuture.Result. +func (future *ServiceEndpointPoliciesUpdateFuture) result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sep.Response.Response, err = future.GetResult(sender); err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { + sep, err = client.UpdateResponder(sep.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") + } + } + return +} + // ServiceEndpointPolicy service End point policy resource. type ServiceEndpointPolicy struct { autorest.Response `json:"-"` @@ -21776,6 +24870,39 @@ type ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture struct { Result func(ServiceEndpointPolicyDefinitionsClient) (ServiceEndpointPolicyDefinition, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture.Result. +func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) result(client ServiceEndpointPolicyDefinitionsClient) (sepd ServiceEndpointPolicyDefinition, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sepd.Response.Response, err = future.GetResult(sender); err == nil && sepd.Response.Response.StatusCode != http.StatusNoContent { + sepd, err = client.CreateOrUpdateResponder(sepd.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", sepd.Response.Response, "Failure responding to request") + } + } + return +} + // ServiceEndpointPolicyDefinitionsDeleteFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type ServiceEndpointPolicyDefinitionsDeleteFuture struct { @@ -21785,6 +24912,33 @@ type ServiceEndpointPolicyDefinitionsDeleteFuture struct { Result func(ServiceEndpointPolicyDefinitionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPolicyDefinitionsDeleteFuture.Result. +func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) result(client ServiceEndpointPolicyDefinitionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ServiceEndpointPolicyListResult response for ListServiceEndpointPolicies API service call. type ServiceEndpointPolicyListResult struct { autorest.Response `json:"-"` @@ -22327,13 +25481,73 @@ type SubnetsCreateOrUpdateFuture struct { Result func(SubnetsClient) (Subnet, error) } -// SubnetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type SubnetsDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(SubnetsClient) (autorest.Response, error) +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SubnetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SubnetsCreateOrUpdateFuture.Result. +func (future *SubnetsCreateOrUpdateFuture) result(client SubnetsClient) (s Subnet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.CreateOrUpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + +// SubnetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SubnetsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(SubnetsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SubnetsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SubnetsDeleteFuture.Result. +func (future *SubnetsDeleteFuture) result(client SubnetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsDeleteFuture") + return + } + ar.Response = future.Response() + return } // SubnetsPrepareNetworkPoliciesFuture an abstraction for monitoring and retrieving the results of a @@ -22345,6 +25559,33 @@ type SubnetsPrepareNetworkPoliciesFuture struct { Result func(SubnetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SubnetsPrepareNetworkPoliciesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SubnetsPrepareNetworkPoliciesFuture.Result. +func (future *SubnetsPrepareNetworkPoliciesFuture) result(client SubnetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsPrepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsPrepareNetworkPoliciesFuture") + return + } + ar.Response = future.Response() + return +} + // SubResource reference to another subresource. type SubResource struct { // ID - Resource ID. @@ -22942,6 +26183,39 @@ type VirtualHubsCreateOrUpdateFuture struct { Result func(VirtualHubsClient) (VirtualHub, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualHubsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualHubsCreateOrUpdateFuture.Result. +func (future *VirtualHubsCreateOrUpdateFuture) result(client VirtualHubsClient) (vh VirtualHub, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualHubsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vh.Response.Response, err = future.GetResult(sender); err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { + vh, err = client.CreateOrUpdateResponder(vh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", vh.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualHubsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualHubsDeleteFuture struct { @@ -22951,6 +26225,33 @@ type VirtualHubsDeleteFuture struct { Result func(VirtualHubsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualHubsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualHubsDeleteFuture.Result. +func (future *VirtualHubsDeleteFuture) result(client VirtualHubsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualHubsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualHubsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualHubsUpdateTagsFuture struct { @@ -22960,6 +26261,39 @@ type VirtualHubsUpdateTagsFuture struct { Result func(VirtualHubsClient) (VirtualHub, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualHubsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualHubsUpdateTagsFuture.Result. +func (future *VirtualHubsUpdateTagsFuture) result(client VirtualHubsClient) (vh VirtualHub, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualHubsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vh.Response.Response, err = future.GetResult(sender); err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { + vh, err = client.UpdateTagsResponder(vh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", vh.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetwork virtual Network resource. type VirtualNetwork struct { autorest.Response `json:"-"` @@ -23797,6 +27131,39 @@ type VirtualNetworkGatewayConnectionsCreateOrUpdateFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (VirtualNetworkGatewayConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsCreateOrUpdateFuture.Result. +func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vngc.Response.Response, err = future.GetResult(sender); err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { + vngc, err = client.CreateOrUpdateResponder(vngc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", vngc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualNetworkGatewayConnectionsDeleteFuture struct { @@ -23806,6 +27173,33 @@ type VirtualNetworkGatewayConnectionsDeleteFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsDeleteFuture.Result. +func (future *VirtualNetworkGatewayConnectionsDeleteFuture) result(client VirtualNetworkGatewayConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkGatewayConnectionsResetSharedKeyFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewayConnectionsResetSharedKeyFuture struct { @@ -23815,6 +27209,39 @@ type VirtualNetworkGatewayConnectionsResetSharedKeyFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (ConnectionResetSharedKey, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsResetSharedKeyFuture.Result. +func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) result(client VirtualNetworkGatewayConnectionsClient) (crsk ConnectionResetSharedKey, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if crsk.Response.Response, err = future.GetResult(sender); err == nil && crsk.Response.Response.StatusCode != http.StatusNoContent { + crsk, err = client.ResetSharedKeyResponder(crsk.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", crsk.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsSetSharedKeyFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewayConnectionsSetSharedKeyFuture struct { @@ -23824,6 +27251,39 @@ type VirtualNetworkGatewayConnectionsSetSharedKeyFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (ConnectionSharedKey, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsSetSharedKeyFuture.Result. +func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) result(client VirtualNetworkGatewayConnectionsClient) (csk ConnectionSharedKey, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if csk.Response.Response, err = future.GetResult(sender); err == nil && csk.Response.Response.StatusCode != http.StatusNoContent { + csk, err = client.SetSharedKeyResponder(csk.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", csk.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsUpdateTagsFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewayConnectionsUpdateTagsFuture struct { @@ -23833,6 +27293,39 @@ type VirtualNetworkGatewayConnectionsUpdateTagsFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (VirtualNetworkGatewayConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsUpdateTagsFuture.Result. +func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vngc.Response.Response, err = future.GetResult(sender); err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { + vngc, err = client.UpdateTagsResponder(vngc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", vngc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayIPConfiguration IP configuration for virtual network gateway type VirtualNetworkGatewayIPConfiguration struct { // VirtualNetworkGatewayIPConfigurationPropertiesFormat - Properties of the virtual network gateway ip configuration. @@ -24352,6 +27845,39 @@ type VirtualNetworkGatewaysCreateOrUpdateFuture struct { Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysCreateOrUpdateFuture.Result. +func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.CreateOrUpdateResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkGatewaysDeleteFuture struct { @@ -24361,6 +27887,33 @@ type VirtualNetworkGatewaysDeleteFuture struct { Result func(VirtualNetworkGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysDeleteFuture.Result. +func (future *VirtualNetworkGatewaysDeleteFuture) result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkGatewaysGeneratevpnclientpackageFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysGeneratevpnclientpackageFuture struct { @@ -24370,6 +27923,39 @@ type VirtualNetworkGatewaysGeneratevpnclientpackageFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGeneratevpnclientpackageFuture.Result. +func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.GeneratevpnclientpackageResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGenerateVpnProfileFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualNetworkGatewaysGenerateVpnProfileFuture struct { @@ -24379,6 +27965,39 @@ type VirtualNetworkGatewaysGenerateVpnProfileFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGenerateVpnProfileFuture.Result. +func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGenerateVpnProfileFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.GenerateVpnProfileResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetAdvertisedRoutesFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualNetworkGatewaysGetAdvertisedRoutesFuture struct { @@ -24388,6 +28007,39 @@ type VirtualNetworkGatewaysGetAdvertisedRoutesFuture struct { Result func(VirtualNetworkGatewaysClient) (GatewayRouteListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetAdvertisedRoutesFuture.Result. +func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if grlr.Response.Response, err = future.GetResult(sender); err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { + grlr, err = client.GetAdvertisedRoutesResponder(grlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetBgpPeerStatusFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualNetworkGatewaysGetBgpPeerStatusFuture struct { @@ -24397,6 +28049,39 @@ type VirtualNetworkGatewaysGetBgpPeerStatusFuture struct { Result func(VirtualNetworkGatewaysClient) (BgpPeerStatusListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetBgpPeerStatusFuture.Result. +func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) result(client VirtualNetworkGatewaysClient) (bpslr BgpPeerStatusListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetBgpPeerStatusFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bpslr.Response.Response, err = future.GetResult(sender); err == nil && bpslr.Response.Response.StatusCode != http.StatusNoContent { + bpslr, err = client.GetBgpPeerStatusResponder(bpslr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", bpslr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetLearnedRoutesFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualNetworkGatewaysGetLearnedRoutesFuture struct { @@ -24406,6 +28091,39 @@ type VirtualNetworkGatewaysGetLearnedRoutesFuture struct { Result func(VirtualNetworkGatewaysClient) (GatewayRouteListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetLearnedRoutesFuture.Result. +func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetLearnedRoutesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if grlr.Response.Response, err = future.GetResult(sender); err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { + grlr, err = client.GetLearnedRoutesResponder(grlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture struct { @@ -24415,6 +28133,39 @@ type VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture struct { Result func(VirtualNetworkGatewaysClient) (VpnClientIPsecParameters, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture.Result. +func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vcipp.Response.Response, err = future.GetResult(sender); err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { + vcipp, err = client.GetVpnclientIpsecParametersResponder(vcipp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetVpnProfilePackageURLFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysGetVpnProfilePackageURLFuture struct { @@ -24424,6 +28175,39 @@ type VirtualNetworkGatewaysGetVpnProfilePackageURLFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetVpnProfilePackageURLFuture.Result. +func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.GetVpnProfilePackageURLResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaySku virtualNetworkGatewaySku details type VirtualNetworkGatewaySku struct { // Name - Gateway SKU name. Possible values include: 'VirtualNetworkGatewaySkuNameBasic', 'VirtualNetworkGatewaySkuNameHighPerformance', 'VirtualNetworkGatewaySkuNameStandard', 'VirtualNetworkGatewaySkuNameUltraPerformance', 'VirtualNetworkGatewaySkuNameVpnGw1', 'VirtualNetworkGatewaySkuNameVpnGw2', 'VirtualNetworkGatewaySkuNameVpnGw3', 'VirtualNetworkGatewaySkuNameVpnGw1AZ', 'VirtualNetworkGatewaySkuNameVpnGw2AZ', 'VirtualNetworkGatewaySkuNameVpnGw3AZ', 'VirtualNetworkGatewaySkuNameErGw1AZ', 'VirtualNetworkGatewaySkuNameErGw2AZ', 'VirtualNetworkGatewaySkuNameErGw3AZ' @@ -24443,15 +28227,75 @@ type VirtualNetworkGatewaysResetFuture struct { Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error) } -// VirtualNetworkGatewaysResetVpnClientSharedKeyFuture an abstraction for monitoring and retrieving the -// results of a long-running operation. -type VirtualNetworkGatewaysResetVpnClientSharedKeyFuture struct { - azure.FutureAPI +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysResetFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysResetFuture.Result. +func (future *VirtualNetworkGatewaysResetFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.ResetResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworkGatewaysResetVpnClientSharedKeyFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type VirtualNetworkGatewaysResetVpnClientSharedKeyFuture struct { + azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. Result func(VirtualNetworkGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysResetVpnClientSharedKeyFuture.Result. +func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture struct { @@ -24461,6 +28305,39 @@ type VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture struct { Result func(VirtualNetworkGatewaysClient) (VpnClientIPsecParameters, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture.Result. +func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vcipp.Response.Response, err = future.GetResult(sender); err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { + vcipp, err = client.SetVpnclientIpsecParametersResponder(vcipp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkGatewaysUpdateTagsFuture struct { @@ -24470,6 +28347,39 @@ type VirtualNetworkGatewaysUpdateTagsFuture struct { Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysUpdateTagsFuture.Result. +func (future *VirtualNetworkGatewaysUpdateTagsFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.UpdateTagsResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkListResult response for the ListVirtualNetworks API service call. type VirtualNetworkListResult struct { autorest.Response `json:"-"` @@ -25070,6 +28980,39 @@ type VirtualNetworkPeeringsCreateOrUpdateFuture struct { Result func(VirtualNetworkPeeringsClient) (VirtualNetworkPeering, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkPeeringsCreateOrUpdateFuture.Result. +func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) result(client VirtualNetworkPeeringsClient) (vnp VirtualNetworkPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnp.Response.Response, err = future.GetResult(sender); err == nil && vnp.Response.Response.StatusCode != http.StatusNoContent { + vnp, err = client.CreateOrUpdateResponder(vnp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", vnp.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkPeeringsDeleteFuture struct { @@ -25079,6 +29022,33 @@ type VirtualNetworkPeeringsDeleteFuture struct { Result func(VirtualNetworkPeeringsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkPeeringsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkPeeringsDeleteFuture.Result. +func (future *VirtualNetworkPeeringsDeleteFuture) result(client VirtualNetworkPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkPropertiesFormat properties of the virtual network. type VirtualNetworkPropertiesFormat struct { // AddressSpace - The AddressSpace that contains an array of IP address ranges that can be used by subnets. @@ -25110,6 +29080,39 @@ type VirtualNetworksCreateOrUpdateFuture struct { Result func(VirtualNetworksClient) (VirtualNetwork, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworksCreateOrUpdateFuture.Result. +func (future *VirtualNetworksCreateOrUpdateFuture) result(client VirtualNetworksClient) (vn VirtualNetwork, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vn.Response.Response, err = future.GetResult(sender); err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { + vn, err = client.CreateOrUpdateResponder(vn.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", vn.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualNetworksDeleteFuture struct { @@ -25119,6 +29122,33 @@ type VirtualNetworksDeleteFuture struct { Result func(VirtualNetworksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworksDeleteFuture.Result. +func (future *VirtualNetworksDeleteFuture) result(client VirtualNetworksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworksUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworksUpdateTagsFuture struct { @@ -25128,6 +29158,39 @@ type VirtualNetworksUpdateTagsFuture struct { Result func(VirtualNetworksClient) (VirtualNetwork, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworksUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworksUpdateTagsFuture.Result. +func (future *VirtualNetworksUpdateTagsFuture) result(client VirtualNetworksClient) (vn VirtualNetwork, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vn.Response.Response, err = future.GetResult(sender); err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { + vn, err = client.UpdateTagsResponder(vn.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", vn.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkTap virtual Network Tap resource type VirtualNetworkTap struct { autorest.Response `json:"-"` @@ -25445,6 +29508,39 @@ type VirtualNetworkTapsCreateOrUpdateFuture struct { Result func(VirtualNetworkTapsClient) (VirtualNetworkTap, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkTapsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkTapsCreateOrUpdateFuture.Result. +func (future *VirtualNetworkTapsCreateOrUpdateFuture) result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnt.Response.Response, err = future.GetResult(sender); err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { + vnt, err = client.CreateOrUpdateResponder(vnt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", vnt.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkTapsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkTapsDeleteFuture struct { @@ -25454,6 +29550,33 @@ type VirtualNetworkTapsDeleteFuture struct { Result func(VirtualNetworkTapsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkTapsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkTapsDeleteFuture.Result. +func (future *VirtualNetworkTapsDeleteFuture) result(client VirtualNetworkTapsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkTapsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkTapsUpdateTagsFuture struct { @@ -25463,6 +29586,39 @@ type VirtualNetworkTapsUpdateTagsFuture struct { Result func(VirtualNetworkTapsClient) (VirtualNetworkTap, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkTapsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkTapsUpdateTagsFuture.Result. +func (future *VirtualNetworkTapsUpdateTagsFuture) result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnt.Response.Response, err = future.GetResult(sender); err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { + vnt, err = client.UpdateTagsResponder(vnt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", vnt.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkUsage usage details for subnet. type VirtualNetworkUsage struct { // CurrentValue - READ-ONLY; Indicates number of IPs used from the Subnet. @@ -25657,6 +29813,39 @@ type VirtualWansCreateOrUpdateFuture struct { Result func(VirtualWansClient) (VirtualWAN, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualWansCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualWansCreateOrUpdateFuture.Result. +func (future *VirtualWansCreateOrUpdateFuture) result(client VirtualWansClient) (vw VirtualWAN, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualWansCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vw.Response.Response, err = future.GetResult(sender); err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { + vw, err = client.CreateOrUpdateResponder(vw.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", vw.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualWansDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualWansDeleteFuture struct { @@ -25666,6 +29855,33 @@ type VirtualWansDeleteFuture struct { Result func(VirtualWansClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualWansDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualWansDeleteFuture.Result. +func (future *VirtualWansDeleteFuture) result(client VirtualWansClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualWansDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualWanSecurityProvider collection of SecurityProviders. type VirtualWanSecurityProvider struct { // Name - Name of the security provider. @@ -25691,6 +29907,39 @@ type VirtualWansUpdateTagsFuture struct { Result func(VirtualWansClient) (VirtualWAN, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualWansUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualWansUpdateTagsFuture.Result. +func (future *VirtualWansUpdateTagsFuture) result(client VirtualWansClient) (vw VirtualWAN, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualWansUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vw.Response.Response, err = future.GetResult(sender); err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { + vw, err = client.UpdateTagsResponder(vw.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", vw.Response.Response, "Failure responding to request") + } + } + return +} + // VpnClientConfiguration vpnClientConfiguration for P2S client. type VpnClientConfiguration struct { // VpnClientAddressPool - The reference of the address space resource which represents Address space for P2S VpnClient. @@ -26119,6 +30368,39 @@ type VpnConnectionsCreateOrUpdateFuture struct { Result func(VpnConnectionsClient) (VpnConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnConnectionsCreateOrUpdateFuture.Result. +func (future *VpnConnectionsCreateOrUpdateFuture) result(client VpnConnectionsClient) (vc VpnConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vc.Response.Response, err = future.GetResult(sender); err == nil && vc.Response.Response.StatusCode != http.StatusNoContent { + vc, err = client.CreateOrUpdateResponder(vc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", vc.Response.Response, "Failure responding to request") + } + } + return +} + // VpnConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnConnectionsDeleteFuture struct { @@ -26128,6 +30410,33 @@ type VpnConnectionsDeleteFuture struct { Result func(VpnConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnConnectionsDeleteFuture.Result. +func (future *VpnConnectionsDeleteFuture) result(client VpnConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VpnDeviceScriptParameters vpn device configuration script generation parameters type VpnDeviceScriptParameters struct { // Vendor - The vendor for the vpn device. @@ -26275,6 +30584,39 @@ type VpnGatewaysCreateOrUpdateFuture struct { Result func(VpnGatewaysClient) (VpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnGatewaysCreateOrUpdateFuture.Result. +func (future *VpnGatewaysCreateOrUpdateFuture) result(client VpnGatewaysClient) (vg VpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vg.Response.Response, err = future.GetResult(sender); err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { + vg, err = client.CreateOrUpdateResponder(vg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", vg.Response.Response, "Failure responding to request") + } + } + return +} + // VpnGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnGatewaysDeleteFuture struct { @@ -26284,6 +30626,33 @@ type VpnGatewaysDeleteFuture struct { Result func(VpnGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnGatewaysDeleteFuture.Result. +func (future *VpnGatewaysDeleteFuture) result(client VpnGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VpnGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnGatewaysUpdateTagsFuture struct { @@ -26293,6 +30662,39 @@ type VpnGatewaysUpdateTagsFuture struct { Result func(VpnGatewaysClient) (VpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnGatewaysUpdateTagsFuture.Result. +func (future *VpnGatewaysUpdateTagsFuture) result(client VpnGatewaysClient) (vg VpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vg.Response.Response, err = future.GetResult(sender); err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { + vg, err = client.UpdateTagsResponder(vg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", vg.Response.Response, "Failure responding to request") + } + } + return +} + // VpnProfileResponse vpn Profile Response for package generation type VpnProfileResponse struct { autorest.Response `json:"-"` @@ -26449,6 +30851,33 @@ type VpnSitesConfigurationDownloadFuture struct { Result func(VpnSitesConfigurationClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesConfigurationDownloadFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesConfigurationDownloadFuture.Result. +func (future *VpnSitesConfigurationDownloadFuture) result(client VpnSitesConfigurationClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesConfigurationDownloadFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesConfigurationDownloadFuture") + return + } + ar.Response = future.Response() + return +} + // VpnSitesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnSitesCreateOrUpdateFuture struct { @@ -26458,6 +30887,39 @@ type VpnSitesCreateOrUpdateFuture struct { Result func(VpnSitesClient) (VpnSite, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesCreateOrUpdateFuture.Result. +func (future *VpnSitesCreateOrUpdateFuture) result(client VpnSitesClient) (vs VpnSite, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vs.Response.Response, err = future.GetResult(sender); err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { + vs, err = client.CreateOrUpdateResponder(vs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", vs.Response.Response, "Failure responding to request") + } + } + return +} + // VpnSitesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnSitesDeleteFuture struct { @@ -26467,6 +30929,33 @@ type VpnSitesDeleteFuture struct { Result func(VpnSitesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesDeleteFuture.Result. +func (future *VpnSitesDeleteFuture) result(client VpnSitesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VpnSitesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnSitesUpdateTagsFuture struct { @@ -26476,6 +30965,39 @@ type VpnSitesUpdateTagsFuture struct { Result func(VpnSitesClient) (VpnSite, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesUpdateTagsFuture.Result. +func (future *VpnSitesUpdateTagsFuture) result(client VpnSitesClient) (vs VpnSite, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vs.Response.Response, err = future.GetResult(sender); err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { + vs, err = client.UpdateTagsResponder(vs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", vs.Response.Response, "Failure responding to request") + } + } + return +} + // Watcher network watcher in a resource group. type Watcher struct { autorest.Response `json:"-"` @@ -26614,6 +31136,39 @@ type WatchersCheckConnectivityFuture struct { Result func(WatchersClient) (ConnectivityInformation, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersCheckConnectivityFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersCheckConnectivityFuture.Result. +func (future *WatchersCheckConnectivityFuture) result(client WatchersClient) (ci ConnectivityInformation, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersCheckConnectivityFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ci.Response.Response, err = future.GetResult(sender); err == nil && ci.Response.Response.StatusCode != http.StatusNoContent { + ci, err = client.CheckConnectivityResponder(ci.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", ci.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WatchersDeleteFuture struct { @@ -26623,6 +31178,33 @@ type WatchersDeleteFuture struct { Result func(WatchersClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersDeleteFuture.Result. +func (future *WatchersDeleteFuture) result(client WatchersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // WatchersGetAzureReachabilityReportFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetAzureReachabilityReportFuture struct { @@ -26632,6 +31214,39 @@ type WatchersGetAzureReachabilityReportFuture struct { Result func(WatchersClient) (AzureReachabilityReport, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetAzureReachabilityReportFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetAzureReachabilityReportFuture.Result. +func (future *WatchersGetAzureReachabilityReportFuture) result(client WatchersClient) (arr AzureReachabilityReport, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetAzureReachabilityReportFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if arr.Response.Response, err = future.GetResult(sender); err == nil && arr.Response.Response.StatusCode != http.StatusNoContent { + arr, err = client.GetAzureReachabilityReportResponder(arr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", arr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetFlowLogStatusFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetFlowLogStatusFuture struct { @@ -26641,6 +31256,39 @@ type WatchersGetFlowLogStatusFuture struct { Result func(WatchersClient) (FlowLogInformation, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetFlowLogStatusFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetFlowLogStatusFuture.Result. +func (future *WatchersGetFlowLogStatusFuture) result(client WatchersClient) (fli FlowLogInformation, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetFlowLogStatusFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fli.Response.Response, err = future.GetResult(sender); err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { + fli, err = client.GetFlowLogStatusResponder(fli.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", fli.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetNetworkConfigurationDiagnosticFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type WatchersGetNetworkConfigurationDiagnosticFuture struct { @@ -26650,6 +31298,39 @@ type WatchersGetNetworkConfigurationDiagnosticFuture struct { Result func(WatchersClient) (ConfigurationDiagnosticResponse, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetNetworkConfigurationDiagnosticFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetNetworkConfigurationDiagnosticFuture.Result. +func (future *WatchersGetNetworkConfigurationDiagnosticFuture) result(client WatchersClient) (cdr ConfigurationDiagnosticResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetNetworkConfigurationDiagnosticFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cdr.Response.Response, err = future.GetResult(sender); err == nil && cdr.Response.Response.StatusCode != http.StatusNoContent { + cdr, err = client.GetNetworkConfigurationDiagnosticResponder(cdr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", cdr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetNextHopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WatchersGetNextHopFuture struct { @@ -26659,6 +31340,39 @@ type WatchersGetNextHopFuture struct { Result func(WatchersClient) (NextHopResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetNextHopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetNextHopFuture.Result. +func (future *WatchersGetNextHopFuture) result(client WatchersClient) (nhr NextHopResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetNextHopFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if nhr.Response.Response, err = future.GetResult(sender); err == nil && nhr.Response.Response.StatusCode != http.StatusNoContent { + nhr, err = client.GetNextHopResponder(nhr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", nhr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetTroubleshootingFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetTroubleshootingFuture struct { @@ -26668,6 +31382,39 @@ type WatchersGetTroubleshootingFuture struct { Result func(WatchersClient) (TroubleshootingResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetTroubleshootingFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetTroubleshootingFuture.Result. +func (future *WatchersGetTroubleshootingFuture) result(client WatchersClient) (tr TroubleshootingResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tr.Response.Response, err = future.GetResult(sender); err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { + tr, err = client.GetTroubleshootingResponder(tr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", tr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetTroubleshootingResultFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetTroubleshootingResultFuture struct { @@ -26677,6 +31424,39 @@ type WatchersGetTroubleshootingResultFuture struct { Result func(WatchersClient) (TroubleshootingResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetTroubleshootingResultFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetTroubleshootingResultFuture.Result. +func (future *WatchersGetTroubleshootingResultFuture) result(client WatchersClient) (tr TroubleshootingResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingResultFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tr.Response.Response, err = future.GetResult(sender); err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { + tr, err = client.GetTroubleshootingResultResponder(tr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", tr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetVMSecurityRulesFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetVMSecurityRulesFuture struct { @@ -26686,6 +31466,39 @@ type WatchersGetVMSecurityRulesFuture struct { Result func(WatchersClient) (SecurityGroupViewResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetVMSecurityRulesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetVMSecurityRulesFuture.Result. +func (future *WatchersGetVMSecurityRulesFuture) result(client WatchersClient) (sgvr SecurityGroupViewResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetVMSecurityRulesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sgvr.Response.Response, err = future.GetResult(sender); err == nil && sgvr.Response.Response.StatusCode != http.StatusNoContent { + sgvr, err = client.GetVMSecurityRulesResponder(sgvr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", sgvr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersListAvailableProvidersFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersListAvailableProvidersFuture struct { @@ -26695,6 +31508,39 @@ type WatchersListAvailableProvidersFuture struct { Result func(WatchersClient) (AvailableProvidersList, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersListAvailableProvidersFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersListAvailableProvidersFuture.Result. +func (future *WatchersListAvailableProvidersFuture) result(client WatchersClient) (apl AvailableProvidersList, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersListAvailableProvidersFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if apl.Response.Response, err = future.GetResult(sender); err == nil && apl.Response.Response.StatusCode != http.StatusNoContent { + apl, err = client.ListAvailableProvidersResponder(apl.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", apl.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersSetFlowLogConfigurationFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersSetFlowLogConfigurationFuture struct { @@ -26704,6 +31550,39 @@ type WatchersSetFlowLogConfigurationFuture struct { Result func(WatchersClient) (FlowLogInformation, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersSetFlowLogConfigurationFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersSetFlowLogConfigurationFuture.Result. +func (future *WatchersSetFlowLogConfigurationFuture) result(client WatchersClient) (fli FlowLogInformation, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersSetFlowLogConfigurationFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fli.Response.Response, err = future.GetResult(sender); err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { + fli, err = client.SetFlowLogConfigurationResponder(fli.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", fli.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersVerifyIPFlowFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WatchersVerifyIPFlowFuture struct { @@ -26713,6 +31592,39 @@ type WatchersVerifyIPFlowFuture struct { Result func(WatchersClient) (VerificationIPFlowResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersVerifyIPFlowFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersVerifyIPFlowFuture.Result. +func (future *WatchersVerifyIPFlowFuture) result(client WatchersClient) (vifr VerificationIPFlowResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersVerifyIPFlowFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vifr.Response.Response, err = future.GetResult(sender); err == nil && vifr.Response.Response.StatusCode != http.StatusNoContent { + vifr, err = client.VerifyIPFlowResponder(vifr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", vifr.Response.Response, "Failure responding to request") + } + } + return +} + // WebApplicationFirewallCustomRule defines contents of a web application rule type WebApplicationFirewallCustomRule struct { // Name - Gets name of the resource that is unique within a policy. This name can be used to access the resource. @@ -26759,6 +31671,33 @@ type WebApplicationFirewallPoliciesDeleteFuture struct { Result func(WebApplicationFirewallPoliciesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WebApplicationFirewallPoliciesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WebApplicationFirewallPoliciesDeleteFuture.Result. +func (future *WebApplicationFirewallPoliciesDeleteFuture) result(client WebApplicationFirewallPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WebApplicationFirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WebApplicationFirewallPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // WebApplicationFirewallPolicy defines web application firewall policy. type WebApplicationFirewallPolicy struct { autorest.Response `json:"-"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/operations.go index 54df3d500d8..10d6238d6d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/operations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpngateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpngateways.go index fedcedb0669..b29ab0319f6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpngateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpngateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client P2sVpnGatewaysClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pvg.Response.Response, err = future.GetResult(sender) - if pvg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { - pvg, err = client.CreateOrUpdateResponder(pvg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", pvg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client P2sVpnGatewaysClient) DeleteSender(req *http.Request) (future P2sVp var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -301,30 +254,7 @@ func (client P2sVpnGatewaysClient) GenerateVpnProfileSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (vpr VpnProfileResponse, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysGenerateVpnProfileFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vpr.Response.Response, err = future.GetResult(sender) - if vpr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", nil, "received nil response and error") - } - if err == nil && vpr.Response.Response.StatusCode != http.StatusNoContent { - vpr, err = client.GenerateVpnProfileResponder(vpr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", vpr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -710,30 +640,7 @@ func (client P2sVpnGatewaysClient) UpdateTagsSender(req *http.Request) (future P var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pvg.Response.Response, err = future.GetResult(sender) - if pvg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { - pvg, err = client.UpdateTagsResponder(pvg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", pvg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpnserverconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpnserverconfigurations.go index b56453a3c49..980436da4c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpnserverconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/p2svpnserverconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -111,30 +100,7 @@ func (client P2sVpnServerConfigurationsClient) CreateOrUpdateSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnServerConfigurationsClient) (pvsc P2SVpnServerConfiguration, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pvsc.Response.Response, err = future.GetResult(sender) - if pvsc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pvsc.Response.Response.StatusCode != http.StatusNoContent { - pvsc, err = client.CreateOrUpdateResponder(pvsc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", pvsc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -214,20 +180,7 @@ func (client P2sVpnServerConfigurationsClient) DeleteSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnServerConfigurationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/packetcaptures.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/packetcaptures.go index 3f9a5c740cd..d042e0ab164 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/packetcaptures.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/packetcaptures.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -118,30 +107,7 @@ func (client PacketCapturesClient) CreateSender(req *http.Request) (future Packe var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (pcr PacketCaptureResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pcr.Response.Response, err = future.GetResult(sender) - if pcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pcr.Response.Response.StatusCode != http.StatusNoContent { - pcr, err = client.CreateResponder(pcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", pcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -221,20 +187,7 @@ func (client PacketCapturesClient) DeleteSender(req *http.Request) (future Packe var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -391,30 +344,7 @@ func (client PacketCapturesClient) GetStatusSender(req *http.Request) (future Pa var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (pcqsr PacketCaptureQueryStatusResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesGetStatusFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pcqsr.Response.Response, err = future.GetResult(sender) - if pcqsr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", nil, "received nil response and error") - } - if err == nil && pcqsr.Response.Response.StatusCode != http.StatusNoContent { - pcqsr, err = client.GetStatusResponder(pcqsr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", pcqsr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -570,20 +500,7 @@ func (client PacketCapturesClient) StopSender(req *http.Request) (future PacketC var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesStopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesStopFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/peerexpressroutecircuitconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/peerexpressroutecircuitconnections.go index 263d40f5eaf..04fcfe350c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/peerexpressroutecircuitconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/peerexpressroutecircuitconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/profiles.go index 7dee462af97..313764de59e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/profiles.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/profiles.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -182,20 +171,7 @@ func (client ProfilesClient) DeleteSender(req *http.Request) (future ProfilesDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ProfilesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ProfilesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ProfilesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipaddresses.go index 62ca490f72d..eca4f4b9760 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipaddresses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipaddresses.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -119,30 +108,7 @@ func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pia.Response.Response, err = future.GetResult(sender) - if pia.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { - pia, err = client.CreateOrUpdateResponder(pia.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", pia.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -220,20 +186,7 @@ func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (future Pu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPAddressesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -954,30 +907,7 @@ func (client PublicIPAddressesClient) UpdateTagsSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pia.Response.Response, err = future.GetResult(sender) - if pia.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { - pia, err = client.UpdateTagsResponder(pia.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", pia.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipprefixes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipprefixes.go index a40fc647846..2509ced0d32 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipprefixes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/publicipprefixes.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client PublicIPPrefixesClient) CreateOrUpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pip.Response.Response, err = future.GetResult(sender) - if pip.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { - pip, err = client.CreateOrUpdateResponder(pip.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", pip.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client PublicIPPrefixesClient) DeleteSender(req *http.Request) (future Pub var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPPrefixesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client PublicIPPrefixesClient) UpdateTagsSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pip.Response.Response, err = future.GetResult(sender) - if pip.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { - pip, err = client.UpdateTagsResponder(pip.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", pip.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilterrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilterrules.go index da57432fb89..d5df335992c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilterrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilterrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -120,30 +109,7 @@ func (client RouteFilterRulesClient) CreateOrUpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rfr.Response.Response, err = future.GetResult(sender) - if rfr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { - rfr, err = client.CreateOrUpdateResponder(rfr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -223,20 +189,7 @@ func (client RouteFilterRulesClient) DeleteSender(req *http.Request) (future Rou var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFilterRulesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -516,30 +469,7 @@ func (client RouteFilterRulesClient) UpdateSender(req *http.Request) (future Rou var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rfr.Response.Response, err = future.GetResult(sender) - if rfr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { - rfr, err = client.UpdateResponder(rfr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilters.go index 04993d2c88e..795254d0421 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routefilters.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client RouteFiltersClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFiltersClient) (rf RouteFilter, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFiltersCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rf.Response.Response, err = future.GetResult(sender) - if rf.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { - rf, err = client.CreateOrUpdateResponder(rf.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client RouteFiltersClient) DeleteSender(req *http.Request) (future RouteFi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFiltersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFiltersDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -613,30 +566,7 @@ func (client RouteFiltersClient) UpdateSender(req *http.Request) (future RouteFi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFiltersClient) (rf RouteFilter, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFiltersUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rf.Response.Response, err = future.GetResult(sender) - if rf.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { - rf, err = client.UpdateResponder(rf.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routes.go index 89c97e26a2d..e1b38c64dbb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routes.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (future Route var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RoutesClient) (r Route, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RoutesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.CreateOrUpdateResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -211,20 +177,7 @@ func (client RoutesClient) DeleteSender(req *http.Request) (future RoutesDeleteF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RoutesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RoutesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routetables.go index 4a89645af4e..dd137ff677b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routetables.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/routetables.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteTablesClient) (rt RouteTable, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteTablesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rt.Response.Response, err = future.GetResult(sender) - if rt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { - rt, err = client.CreateOrUpdateResponder(rt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", rt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client RouteTablesClient) DeleteSender(req *http.Request) (future RouteTab var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteTablesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteTablesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -609,30 +562,7 @@ func (client RouteTablesClient) UpdateTagsSender(req *http.Request) (future Rout var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteTablesClient) (rt RouteTable, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteTablesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rt.Response.Response, err = future.GetResult(sender) - if rt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { - rt, err = client.UpdateTagsResponder(rt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", rt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securitygroups.go index c43b2e33cf5..4c92bf04eaa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securitygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securitygroups.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityGroupsClient) (sg SecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sg.Response.Response, err = future.GetResult(sender) - if sg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { - sg, err = client.CreateOrUpdateResponder(sg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", sg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client SecurityGroupsClient) DeleteSender(req *http.Request) (future Secur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityGroupsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -609,30 +562,7 @@ func (client SecurityGroupsClient) UpdateTagsSender(req *http.Request) (future S var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityGroupsClient) (sg SecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sg.Response.Response, err = future.GetResult(sender) - if sg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { - sg, err = client.UpdateTagsResponder(sg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", sg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securityrules.go index 28782ec4e50..0b4532e25bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/securityrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityRulesClient) (sr SecurityRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityRulesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sr.Response.Response, err = future.GetResult(sender) - if sr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sr.Response.Response.StatusCode != http.StatusNoContent { - sr, err = client.CreateOrUpdateResponder(sr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", sr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -211,20 +177,7 @@ func (client SecurityRulesClient) DeleteSender(req *http.Request) (future Securi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityRulesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityRulesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicies.go index df9c2abc4a3..c57f8293677 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicies.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client ServiceEndpointPoliciesClient) CreateOrUpdateSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sep.Response.Response, err = future.GetResult(sender) - if sep.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { - sep, err = client.CreateOrUpdateResponder(sep.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client ServiceEndpointPoliciesClient) DeleteSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPoliciesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client ServiceEndpointPoliciesClient) UpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sep.Response.Response, err = future.GetResult(sender) - if sep.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { - sep, err = client.UpdateResponder(sep.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicydefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicydefinitions.go index 6319311500b..8d4e8319c0a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicydefinitions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/serviceendpointpolicydefinitions.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdateSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPolicyDefinitionsClient) (sepd ServiceEndpointPolicyDefinition, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sepd.Response.Response, err = future.GetResult(sender) - if sepd.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sepd.Response.Response.StatusCode != http.StatusNoContent { - sepd, err = client.CreateOrUpdateResponder(sepd.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", sepd.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client ServiceEndpointPolicyDefinitionsClient) DeleteSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPolicyDefinitionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/subnets.go index c349da9ca0b..f92c7b3879f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/subnets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/subnets.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (future Subn var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SubnetsClient) (s Subnet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SubnetsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.CreateOrUpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -211,20 +177,7 @@ func (client SubnetsClient) DeleteSender(req *http.Request) (future SubnetsDelet var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SubnetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SubnetsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -507,20 +460,7 @@ func (client SubnetsClient) PrepareNetworkPoliciesSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SubnetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsPrepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SubnetsPrepareNetworkPoliciesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/usages.go index 68db2cbfd9c..bb7d887f0f2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/usages.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/version.go index 91a37daf05f..770a6a2c9e7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/version.go @@ -2,19 +2,8 @@ package network import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualhubs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualhubs.go index d364bc0339f..b80998d2354 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualhubs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualhubs.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VirtualHubsClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualHubsClient) (vh VirtualHub, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualHubsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vh.Response.Response, err = future.GetResult(sender) - if vh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { - vh, err = client.CreateOrUpdateResponder(vh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", vh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VirtualHubsClient) DeleteSender(req *http.Request) (future VirtualH var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualHubsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualHubsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -606,30 +559,7 @@ func (client VirtualHubsClient) UpdateTagsSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualHubsClient) (vh VirtualHub, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualHubsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vh.Response.Response, err = future.GetResult(sender) - if vh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { - vh, err = client.UpdateTagsResponder(vh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", vh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgatewayconnections.go index 6ef88d6224e..c4faadef55f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgatewayconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgatewayconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -121,30 +110,7 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vngc.Response.Response, err = future.GetResult(sender) - if vngc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { - vngc, err = client.CreateOrUpdateResponder(vngc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", vngc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -222,20 +188,7 @@ func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -597,30 +550,7 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (crsk ConnectionResetSharedKey, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - crsk.Response.Response, err = future.GetResult(sender) - if crsk.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", nil, "received nil response and error") - } - if err == nil && crsk.Response.Response.StatusCode != http.StatusNoContent { - crsk, err = client.ResetSharedKeyResponder(crsk.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", crsk.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -710,30 +640,7 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (csk ConnectionSharedKey, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - csk.Response.Response, err = future.GetResult(sender) - if csk.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", nil, "received nil response and error") - } - if err == nil && csk.Response.Response.StatusCode != http.StatusNoContent { - csk, err = client.SetSharedKeyResponder(csk.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", csk.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -814,30 +721,7 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTagsSender(req *http. var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vngc.Response.Response, err = future.GetResult(sender) - if vngc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { - vngc, err = client.UpdateTagsResponder(vngc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", vngc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgateways.go index 5dd926c4605..5104011b90e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkgateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -114,30 +103,7 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vng.Response.Response, err = future.GetResult(sender) - if vng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { - vng, err = client.CreateOrUpdateResponder(vng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", vng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -215,20 +181,7 @@ func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -309,30 +262,7 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.GeneratevpnclientpackageResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -414,30 +344,7 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfileSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGenerateVpnProfileFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.GenerateVpnProfileResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -594,30 +501,7 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - grlr.Response.Response, err = future.GetResult(sender) - if grlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", nil, "received nil response and error") - } - if err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { - grlr, err = client.GetAdvertisedRoutesResponder(grlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -699,30 +583,7 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (bpslr BgpPeerStatusListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetBgpPeerStatusFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - bpslr.Response.Response, err = future.GetResult(sender) - if bpslr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", nil, "received nil response and error") - } - if err == nil && bpslr.Response.Response.StatusCode != http.StatusNoContent { - bpslr, err = client.GetBgpPeerStatusResponder(bpslr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", bpslr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -801,30 +662,7 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetLearnedRoutesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - grlr.Response.Response, err = future.GetResult(sender) - if grlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", nil, "received nil response and error") - } - if err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { - grlr, err = client.GetLearnedRoutesResponder(grlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -904,30 +742,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnclientIpsecParametersSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vcipp.Response.Response, err = future.GetResult(sender) - if vcipp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", nil, "received nil response and error") - } - if err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { - vcipp, err = client.GetVpnclientIpsecParametersResponder(vcipp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1006,30 +821,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLSender(req *ht var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.GetVpnProfilePackageURLResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1346,30 +1138,7 @@ func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vng.Response.Response, err = future.GetResult(sender) - if vng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", nil, "received nil response and error") - } - if err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { - vng, err = client.ResetResponder(vng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", vng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1448,20 +1217,7 @@ func (client VirtualNetworkGatewaysClient) ResetVpnClientSharedKeySender(req *ht var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1550,30 +1306,7 @@ func (client VirtualNetworkGatewaysClient) SetVpnclientIpsecParametersSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vcipp.Response.Response, err = future.GetResult(sender) - if vcipp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", nil, "received nil response and error") - } - if err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { - vcipp, err = client.SetVpnclientIpsecParametersResponder(vcipp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1730,30 +1463,7 @@ func (client VirtualNetworkGatewaysClient) UpdateTagsSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vng.Response.Response, err = future.GetResult(sender) - if vng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { - vng, err = client.UpdateTagsResponder(vng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", vng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkpeerings.go index 437945d948d..8614162f1c3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworkpeerings.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkPeeringsClient) (vnp VirtualNetworkPeering, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnp.Response.Response, err = future.GetResult(sender) - if vnp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnp.Response.Response.StatusCode != http.StatusNoContent { - vnp, err = client.CreateOrUpdateResponder(vnp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", vnp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkPeeringsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworks.go index 0a65f8482d4..f9e0cd5e93a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworks.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -184,30 +173,7 @@ func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworksClient) (vn VirtualNetwork, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vn.Response.Response, err = future.GetResult(sender) - if vn.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { - vn, err = client.CreateOrUpdateResponder(vn.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", vn.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -285,20 +251,7 @@ func (client VirtualNetworksClient) DeleteSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -805,30 +758,7 @@ func (client VirtualNetworksClient) UpdateTagsSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworksClient) (vn VirtualNetwork, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vn.Response.Response, err = future.GetResult(sender) - if vn.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { - vn, err = client.UpdateTagsResponder(vn.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", vn.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworktaps.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworktaps.go index 031d5faca94..a75afb85d3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworktaps.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualnetworktaps.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -139,30 +128,7 @@ func (client VirtualNetworkTapsClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnt.Response.Response, err = future.GetResult(sender) - if vnt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { - vnt, err = client.CreateOrUpdateResponder(vnt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", vnt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -240,20 +206,7 @@ func (client VirtualNetworkTapsClient) DeleteSender(req *http.Request) (future V var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkTapsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -638,30 +591,7 @@ func (client VirtualNetworkTapsClient) UpdateTagsSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnt.Response.Response, err = future.GetResult(sender) - if vnt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { - vnt, err = client.UpdateTagsResponder(vnt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", vnt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualwans.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualwans.go index 93d4684589a..749bd92ef97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualwans.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/virtualwans.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VirtualWansClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualWansClient) (vw VirtualWAN, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualWansCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vw.Response.Response, err = future.GetResult(sender) - if vw.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { - vw, err = client.CreateOrUpdateResponder(vw.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", vw.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VirtualWansClient) DeleteSender(req *http.Request) (future VirtualW var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualWansClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualWansDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -606,30 +559,7 @@ func (client VirtualWansClient) UpdateTagsSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualWansClient) (vw VirtualWAN, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualWansUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vw.Response.Response, err = future.GetResult(sender) - if vw.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { - vw, err = client.UpdateTagsResponder(vw.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", vw.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnconnections.go index a27b903ff02..a3f86e1d9d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client VpnConnectionsClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnConnectionsClient) (vc VpnConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vc.Response.Response, err = future.GetResult(sender) - if vc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vc.Response.Response.StatusCode != http.StatusNoContent { - vc, err = client.CreateOrUpdateResponder(vc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", vc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client VpnConnectionsClient) DeleteSender(req *http.Request) (future VpnCo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpngateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpngateways.go index b4f6740b93e..21f025d84b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpngateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpngateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VpnGatewaysClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnGatewaysClient) (vg VpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vg.Response.Response, err = future.GetResult(sender) - if vg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { - vg, err = client.CreateOrUpdateResponder(vg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", vg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VpnGatewaysClient) DeleteSender(req *http.Request) (future VpnGatew var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -606,30 +559,7 @@ func (client VpnGatewaysClient) UpdateTagsSender(req *http.Request) (future VpnG var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnGatewaysClient) (vg VpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vg.Response.Response, err = future.GetResult(sender) - if vg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { - vg, err = client.UpdateTagsResponder(vg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", vg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsites.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsites.go index 841fadefbd9..9281ac308fd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsites.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsites.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VpnSitesClient) CreateOrUpdateSender(req *http.Request) (future Vpn var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesClient) (vs VpnSite, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vs.Response.Response, err = future.GetResult(sender) - if vs.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { - vs, err = client.CreateOrUpdateResponder(vs.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", vs.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VpnSitesClient) DeleteSender(req *http.Request) (future VpnSitesDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -606,30 +559,7 @@ func (client VpnSitesClient) UpdateTagsSender(req *http.Request) (future VpnSite var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesClient) (vs VpnSite, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vs.Response.Response, err = future.GetResult(sender) - if vs.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { - vs, err = client.UpdateTagsResponder(vs.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", vs.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsitesconfiguration.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsitesconfiguration.go index 9a9f8ee7093..bbf6f4b10eb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsitesconfiguration.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/vpnsitesconfiguration.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,20 +96,7 @@ func (client VpnSitesConfigurationClient) DownloadSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesConfigurationClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesConfigurationDownloadFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesConfigurationDownloadFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/watchers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/watchers.go index d89be7efc84..6d4d6714c7a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/watchers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/watchers.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,30 +105,7 @@ func (client WatchersClient) CheckConnectivitySender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (ci ConnectivityInformation, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersCheckConnectivityFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ci.Response.Response, err = future.GetResult(sender) - if ci.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", nil, "received nil response and error") - } - if err == nil && ci.Response.Response.StatusCode != http.StatusNoContent { - ci, err = client.CheckConnectivityResponder(ci.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", ci.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -296,20 +262,7 @@ func (client WatchersClient) DeleteSender(req *http.Request) (future WatchersDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -475,30 +428,7 @@ func (client WatchersClient) GetAzureReachabilityReportSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (arr AzureReachabilityReport, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetAzureReachabilityReportFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - arr.Response.Response, err = future.GetResult(sender) - if arr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", nil, "received nil response and error") - } - if err == nil && arr.Response.Response.StatusCode != http.StatusNoContent { - arr, err = client.GetAzureReachabilityReportResponder(arr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", arr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -585,30 +515,7 @@ func (client WatchersClient) GetFlowLogStatusSender(req *http.Request) (future W var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (fli FlowLogInformation, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetFlowLogStatusFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - fli.Response.Response, err = future.GetResult(sender) - if fli.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", nil, "received nil response and error") - } - if err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { - fli, err = client.GetFlowLogStatusResponder(fli.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", fli.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -696,30 +603,7 @@ func (client WatchersClient) GetNetworkConfigurationDiagnosticSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (cdr ConfigurationDiagnosticResponse, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetNetworkConfigurationDiagnosticFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cdr.Response.Response, err = future.GetResult(sender) - if cdr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", nil, "received nil response and error") - } - if err == nil && cdr.Response.Response.StatusCode != http.StatusNoContent { - cdr, err = client.GetNetworkConfigurationDiagnosticResponder(cdr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", cdr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -808,30 +692,7 @@ func (client WatchersClient) GetNextHopSender(req *http.Request) (future Watcher var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (nhr NextHopResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetNextHopFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - nhr.Response.Response, err = future.GetResult(sender) - if nhr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", nil, "received nil response and error") - } - if err == nil && nhr.Response.Response.StatusCode != http.StatusNoContent { - nhr, err = client.GetNextHopResponder(nhr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", nhr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1001,30 +862,7 @@ func (client WatchersClient) GetTroubleshootingSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (tr TroubleshootingResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tr.Response.Response, err = future.GetResult(sender) - if tr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", nil, "received nil response and error") - } - if err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { - tr, err = client.GetTroubleshootingResponder(tr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", tr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1111,30 +949,7 @@ func (client WatchersClient) GetTroubleshootingResultSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (tr TroubleshootingResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingResultFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tr.Response.Response, err = future.GetResult(sender) - if tr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", nil, "received nil response and error") - } - if err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { - tr, err = client.GetTroubleshootingResultResponder(tr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", tr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1221,30 +1036,7 @@ func (client WatchersClient) GetVMSecurityRulesSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (sgvr SecurityGroupViewResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetVMSecurityRulesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sgvr.Response.Response, err = future.GetResult(sender) - if sgvr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", nil, "received nil response and error") - } - if err == nil && sgvr.Response.Response.StatusCode != http.StatusNoContent { - sgvr, err = client.GetVMSecurityRulesResponder(sgvr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", sgvr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1470,30 +1262,7 @@ func (client WatchersClient) ListAvailableProvidersSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (apl AvailableProvidersList, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersListAvailableProvidersFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - apl.Response.Response, err = future.GetResult(sender) - if apl.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", nil, "received nil response and error") - } - if err == nil && apl.Response.Response.StatusCode != http.StatusNoContent { - apl, err = client.ListAvailableProvidersResponder(apl.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", apl.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1592,30 +1361,7 @@ func (client WatchersClient) SetFlowLogConfigurationSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (fli FlowLogInformation, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersSetFlowLogConfigurationFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - fli.Response.Response, err = future.GetResult(sender) - if fli.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", nil, "received nil response and error") - } - if err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { - fli, err = client.SetFlowLogConfigurationResponder(fli.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", fli.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1785,30 +1531,7 @@ func (client WatchersClient) VerifyIPFlowSender(req *http.Request) (future Watch var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (vifr VerificationIPFlowResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersVerifyIPFlowFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vifr.Response.Response, err = future.GetResult(sender) - if vifr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", nil, "received nil response and error") - } - if err == nil && vifr.Response.Response.StatusCode != http.StatusNoContent { - vifr, err = client.VerifyIPFlowResponder(vifr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", vifr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/webapplicationfirewallpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/webapplicationfirewallpolicies.go index 5b753cf22f4..80d4e22526f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/webapplicationfirewallpolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/webapplicationfirewallpolicies.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -196,20 +185,7 @@ func (client WebApplicationFirewallPoliciesClient) DeleteSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WebApplicationFirewallPoliciesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WebApplicationFirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WebApplicationFirewallPoliciesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/CHANGELOG.md index f3d57dda5a4..5a852aedd15 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/CHANGELOG.md @@ -1,5 +1,193 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/network/resource-manager/readme.md tag: `package-2019-07` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *ApplicationGatewaysBackendHealthFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysBackendHealthOnDemandFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysStartFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysStopFuture.UnmarshalJSON([]byte) error +1. *ApplicationGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ApplicationSecurityGroupsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ApplicationSecurityGroupsDeleteFuture.UnmarshalJSON([]byte) error +1. *ApplicationSecurityGroupsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *AzureFirewallsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *AzureFirewallsDeleteFuture.UnmarshalJSON([]byte) error +1. *BastionHostsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *BastionHostsDeleteFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsDeleteFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsQueryFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsStartFuture.UnmarshalJSON([]byte) error +1. *ConnectionMonitorsStopFuture.UnmarshalJSON([]byte) error +1. *DdosCustomPoliciesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DdosCustomPoliciesDeleteFuture.UnmarshalJSON([]byte) error +1. *DdosCustomPoliciesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *DdosProtectionPlansCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DdosProtectionPlansDeleteFuture.UnmarshalJSON([]byte) error +1. *DdosProtectionPlansUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitAuthorizationsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitPeeringsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitPeeringsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsListArpTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsListRoutesTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsListRoutesTableSummaryFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCircuitsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionPeeringsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsListArpTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsListRoutesTableFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteCrossConnectionsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRouteGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRoutePortsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ExpressRoutePortsDeleteFuture.UnmarshalJSON([]byte) error +1. *ExpressRoutePortsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *FirewallPoliciesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *FirewallPoliciesDeleteFuture.UnmarshalJSON([]byte) error +1. *FirewallPolicyRuleGroupsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *FirewallPolicyRuleGroupsDeleteFuture.UnmarshalJSON([]byte) error +1. *InboundNatRulesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *InboundNatRulesDeleteFuture.UnmarshalJSON([]byte) error +1. *InterfaceTapConfigurationsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *InterfaceTapConfigurationsDeleteFuture.UnmarshalJSON([]byte) error +1. *InterfacesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *InterfacesDeleteFuture.UnmarshalJSON([]byte) error +1. *InterfacesGetEffectiveRouteTableFuture.UnmarshalJSON([]byte) error +1. *InterfacesListEffectiveNetworkSecurityGroupsFuture.UnmarshalJSON([]byte) error +1. *InterfacesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *LoadBalancersCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *LoadBalancersDeleteFuture.UnmarshalJSON([]byte) error +1. *LoadBalancersUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *LocalNetworkGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *LocalNetworkGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *LocalNetworkGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *NatGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *NatGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysGenerateVpnProfileFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysGetP2sVpnConnectionHealthFuture.UnmarshalJSON([]byte) error +1. *P2sVpnGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *P2sVpnServerConfigurationsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *P2sVpnServerConfigurationsDeleteFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesCreateFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesDeleteFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesGetStatusFuture.UnmarshalJSON([]byte) error +1. *PacketCapturesStopFuture.UnmarshalJSON([]byte) error +1. *PrivateEndpointsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PrivateEndpointsDeleteFuture.UnmarshalJSON([]byte) error +1. *PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture.UnmarshalJSON([]byte) error +1. *PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture.UnmarshalJSON([]byte) error +1. *PrivateLinkServicesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PrivateLinkServicesDeleteFuture.UnmarshalJSON([]byte) error +1. *PrivateLinkServicesDeletePrivateEndpointConnectionFuture.UnmarshalJSON([]byte) error +1. *ProfilesDeleteFuture.UnmarshalJSON([]byte) error +1. *PublicIPAddressesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PublicIPAddressesDeleteFuture.UnmarshalJSON([]byte) error +1. *PublicIPAddressesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *PublicIPPrefixesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PublicIPPrefixesDeleteFuture.UnmarshalJSON([]byte) error +1. *PublicIPPrefixesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *RouteFilterRulesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteFilterRulesDeleteFuture.UnmarshalJSON([]byte) error +1. *RouteFilterRulesUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteFiltersCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteFiltersDeleteFuture.UnmarshalJSON([]byte) error +1. *RouteFiltersUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteTablesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RouteTablesDeleteFuture.UnmarshalJSON([]byte) error +1. *RouteTablesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *RoutesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *RoutesDeleteFuture.UnmarshalJSON([]byte) error +1. *SecurityGroupsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SecurityGroupsDeleteFuture.UnmarshalJSON([]byte) error +1. *SecurityGroupsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *SecurityRulesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SecurityRulesDeleteFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPoliciesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPoliciesDeleteFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPoliciesUpdateFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ServiceEndpointPolicyDefinitionsDeleteFuture.UnmarshalJSON([]byte) error +1. *SubnetsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *SubnetsDeleteFuture.UnmarshalJSON([]byte) error +1. *SubnetsPrepareNetworkPoliciesFuture.UnmarshalJSON([]byte) error +1. *SubnetsUnprepareNetworkPoliciesFuture.UnmarshalJSON([]byte) error +1. *VirtualHubsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualHubsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualHubsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsResetSharedKeyFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsSetSharedKeyFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsStartPacketCaptureFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsStopPacketCaptureFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewayConnectionsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGenerateVpnProfileFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGeneratevpnclientpackageFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetAdvertisedRoutesFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetBgpPeerStatusFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetLearnedRoutesFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysResetFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysStartPacketCaptureFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysStopPacketCaptureFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkPeeringsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkPeeringsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkTapsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkTapsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkTapsUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworksCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworksDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworksUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VirtualRouterPeeringsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualRouterPeeringsDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualRoutersCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualRoutersDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualWansCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualWansDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualWansUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VpnConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VpnConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *VpnGatewaysCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VpnGatewaysDeleteFuture.UnmarshalJSON([]byte) error +1. *VpnGatewaysResetFuture.UnmarshalJSON([]byte) error +1. *VpnGatewaysUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *VpnSitesConfigurationDownloadFuture.UnmarshalJSON([]byte) error +1. *VpnSitesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VpnSitesDeleteFuture.UnmarshalJSON([]byte) error +1. *VpnSitesUpdateTagsFuture.UnmarshalJSON([]byte) error +1. *WatchersCheckConnectivityFuture.UnmarshalJSON([]byte) error +1. *WatchersDeleteFuture.UnmarshalJSON([]byte) error +1. *WatchersGetAzureReachabilityReportFuture.UnmarshalJSON([]byte) error +1. *WatchersGetFlowLogStatusFuture.UnmarshalJSON([]byte) error +1. *WatchersGetNetworkConfigurationDiagnosticFuture.UnmarshalJSON([]byte) error +1. *WatchersGetNextHopFuture.UnmarshalJSON([]byte) error +1. *WatchersGetTroubleshootingFuture.UnmarshalJSON([]byte) error +1. *WatchersGetTroubleshootingResultFuture.UnmarshalJSON([]byte) error +1. *WatchersGetVMSecurityRulesFuture.UnmarshalJSON([]byte) error +1. *WatchersListAvailableProvidersFuture.UnmarshalJSON([]byte) error +1. *WatchersSetFlowLogConfigurationFuture.UnmarshalJSON([]byte) error +1. *WatchersVerifyIPFlowFuture.UnmarshalJSON([]byte) error +1. *WebApplicationFirewallPoliciesDeleteFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/_meta.json new file mode 100644 index 00000000000..9d1a8737ac1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/network/resource-manager/readme.md", + "tag": "package-2019-07", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2019-07 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/network/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationgateways.go index 4b3545188e6..ffe915829ad 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationgateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -109,30 +98,7 @@ func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (agbh ApplicationGatewayBackendHealth, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysBackendHealthFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - agbh.Response.Response, err = future.GetResult(sender) - if agbh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", nil, "received nil response and error") - } - if err == nil && agbh.Response.Response.StatusCode != http.StatusNoContent { - agbh, err = client.BackendHealthResponder(agbh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", agbh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -218,30 +184,7 @@ func (client ApplicationGatewaysClient) BackendHealthOnDemandSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (agbhod ApplicationGatewayBackendHealthOnDemand, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthOnDemandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysBackendHealthOnDemandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - agbhod.Response.Response, err = future.GetResult(sender) - if agbhod.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthOnDemandFuture", "Result", nil, "received nil response and error") - } - if err == nil && agbhod.Response.Response.StatusCode != http.StatusNoContent { - agbhod, err = client.BackendHealthOnDemandResponder(agbhod.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthOnDemandFuture", "Result", agbhod.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -350,30 +293,7 @@ func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ag.Response.Response, err = future.GetResult(sender) - if ag.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { - ag, err = client.CreateOrUpdateResponder(ag.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", ag.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -451,20 +371,7 @@ func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1388,20 +1295,7 @@ func (client ApplicationGatewaysClient) StartSender(req *http.Request) (future A var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1478,20 +1372,7 @@ func (client ApplicationGatewaysClient) StopSender(req *http.Request) (future Ap var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStopFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1571,30 +1452,7 @@ func (client ApplicationGatewaysClient) UpdateTagsSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ag.Response.Response, err = future.GetResult(sender) - if ag.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { - ag, err = client.UpdateTagsResponder(ag.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", ag.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationsecuritygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationsecuritygroups.go index 2632807db5e..08f19bd85dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationsecuritygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/applicationsecuritygroups.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdateSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - asg.Response.Response, err = future.GetResult(sender) - if asg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { - asg, err = client.CreateOrUpdateResponder(asg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", asg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client ApplicationSecurityGroupsClient) DeleteSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationSecurityGroupsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -607,30 +560,7 @@ func (client ApplicationSecurityGroupsClient) UpdateTagsSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - asg.Response.Response, err = future.GetResult(sender) - if asg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { - asg, err = client.UpdateTagsResponder(asg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", asg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availabledelegations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availabledelegations.go index 777d94ef707..d394813dfc7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availabledelegations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availabledelegations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableendpointservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableendpointservices.go index a986101b2c2..da4d439010c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableendpointservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableendpointservices.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableprivateendpointtypes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableprivateendpointtypes.go index 4561d496d35..a75cb0ff0c0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableprivateendpointtypes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableprivateendpointtypes.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableresourcegroupdelegations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableresourcegroupdelegations.go index 0c1df772f0b..f88e8419a2e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableresourcegroupdelegations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/availableresourcegroupdelegations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewallfqdntags.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewallfqdntags.go index f76ae08b33e..529c14ed6aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewallfqdntags.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewallfqdntags.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewalls.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewalls.go index ad77309bb73..77a4937dd00 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewalls.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/azurefirewalls.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client AzureFirewallsClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AzureFirewallsClient) (af AzureFirewall, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - af.Response.Response, err = future.GetResult(sender) - if af.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && af.Response.Response.StatusCode != http.StatusNoContent { - af, err = client.CreateOrUpdateResponder(af.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", af.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client AzureFirewallsClient) DeleteSender(req *http.Request) (future Azure var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AzureFirewallsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.AzureFirewallsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bastionhosts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bastionhosts.go index 1d56acb43e5..08f9377141d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bastionhosts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bastionhosts.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client BastionHostsClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client BastionHostsClient) (bh BastionHost, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.BastionHostsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.BastionHostsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - bh.Response.Response, err = future.GetResult(sender) - if bh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.BastionHostsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && bh.Response.Response.StatusCode != http.StatusNoContent { - bh, err = client.CreateOrUpdateResponder(bh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.BastionHostsCreateOrUpdateFuture", "Result", bh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client BastionHostsClient) DeleteSender(req *http.Request) (future Bastion var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client BastionHostsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.BastionHostsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.BastionHostsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bgpservicecommunities.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bgpservicecommunities.go index 8c0f28ceb54..02d566552d5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bgpservicecommunities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/bgpservicecommunities.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/client.go index 09a2c98fee1..b58cdccafc1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/client.go @@ -3,19 +3,8 @@ // Network Client package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/connectionmonitors.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/connectionmonitors.go index e842ee603e5..b13c2123545 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/connectionmonitors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/connectionmonitors.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -120,30 +109,7 @@ func (client ConnectionMonitorsClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (cmr ConnectionMonitorResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cmr.Response.Response, err = future.GetResult(sender) - if cmr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && cmr.Response.Response.StatusCode != http.StatusNoContent { - cmr, err = client.CreateOrUpdateResponder(cmr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", cmr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -223,20 +189,7 @@ func (client ConnectionMonitorsClient) DeleteSender(req *http.Request) (future C var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -469,30 +422,7 @@ func (client ConnectionMonitorsClient) QuerySender(req *http.Request) (future Co var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (cmqr ConnectionMonitorQueryResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsQueryFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cmqr.Response.Response, err = future.GetResult(sender) - if cmqr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", nil, "received nil response and error") - } - if err == nil && cmqr.Response.Response.StatusCode != http.StatusNoContent { - cmqr, err = client.QueryResponder(cmqr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", cmqr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -572,20 +502,7 @@ func (client ConnectionMonitorsClient) StartSender(req *http.Request) (future Co var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStartFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -664,20 +581,7 @@ func (client ConnectionMonitorsClient) StopSender(req *http.Request) (future Con var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectionMonitorsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStopFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddoscustompolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddoscustompolicies.go index 60753a037ca..c1c23291cce 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddoscustompolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddoscustompolicies.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client DdosCustomPoliciesClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dcp.Response.Response, err = future.GetResult(sender) - if dcp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { - dcp, err = client.CreateOrUpdateResponder(dcp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", dcp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client DdosCustomPoliciesClient) DeleteSender(req *http.Request) (future D var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosCustomPoliciesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -378,30 +331,7 @@ func (client DdosCustomPoliciesClient) UpdateTagsSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dcp.Response.Response, err = future.GetResult(sender) - if dcp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { - dcp, err = client.UpdateTagsResponder(dcp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", dcp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddosprotectionplans.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddosprotectionplans.go index 4115938108b..17728685ae5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddosprotectionplans.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/ddosprotectionplans.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -111,30 +100,7 @@ func (client DdosProtectionPlansClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dpp.Response.Response, err = future.GetResult(sender) - if dpp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { - dpp, err = client.CreateOrUpdateResponder(dpp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", dpp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -212,20 +178,7 @@ func (client DdosProtectionPlansClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosProtectionPlansClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client DdosProtectionPlansClient) UpdateTagsSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - dpp.Response.Response, err = future.GetResult(sender) - if dpp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { - dpp, err = client.UpdateTagsResponder(dpp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", dpp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/defaultsecurityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/defaultsecurityrules.go index 74516020f0b..ef81d81bc36 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/defaultsecurityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/defaultsecurityrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/enums.go index c1c8487fbe6..f04c749e16f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/enums.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitauthorizations.go index 36e5e7ce4dc..affb8d962c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitauthorizations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitauthorizations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -113,30 +102,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req * var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitAuthorizationsClient) (erca ExpressRouteCircuitAuthorization, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erca.Response.Response, err = future.GetResult(sender) - if erca.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erca.Response.Response.StatusCode != http.StatusNoContent { - erca, err = client.CreateOrUpdateResponder(erca.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", erca.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -216,20 +182,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitAuthorizationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitconnections.go index 8c3c22d2d31..b093cf9b20c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -114,30 +103,7 @@ func (client ExpressRouteCircuitConnectionsClient) CreateOrUpdateSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitConnectionsClient) (ercc ExpressRouteCircuitConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercc.Response.Response, err = future.GetResult(sender) - if ercc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { - ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -219,20 +185,7 @@ func (client ExpressRouteCircuitConnectionsClient) DeleteSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitpeerings.go index ffe8083fc8a..4ee745e1eae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuitpeerings.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitPeeringsClient) (ercp ExpressRouteCircuitPeering, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercp.Response.Response, err = future.GetResult(sender) - if ercp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercp.Response.Response.StatusCode != http.StatusNoContent { - ercp, err = client.CreateOrUpdateResponder(ercp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", ercp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitPeeringsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuits.go index ec944980698..495a44a5390 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuits.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecircuits.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erc.Response.Response, err = future.GetResult(sender) - if erc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { - erc, err = client.CreateOrUpdateResponder(erc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -762,30 +715,7 @@ func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListArpTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercatlr.Response.Response, err = future.GetResult(sender) - if ercatlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { - ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -868,30 +798,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercrtlr.Response.Response, err = future.GetResult(sender) - if ercrtlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { - ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -974,30 +881,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http. var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (ercrtslr ExpressRouteCircuitsRoutesTableSummaryListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableSummaryFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercrtslr.Response.Response, err = future.GetResult(sender) - if ercrtslr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercrtslr.Response.Response.StatusCode != http.StatusNoContent { - ercrtslr, err = client.ListRoutesTableSummaryResponder(ercrtslr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", ercrtslr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1078,30 +962,7 @@ func (client ExpressRouteCircuitsClient) UpdateTagsSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erc.Response.Response, err = future.GetResult(sender) - if erc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { - erc, err = client.UpdateTagsResponder(erc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", erc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteconnections.go index 670a7d543ed..dab6cef7dbb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -118,30 +107,7 @@ func (client ExpressRouteConnectionsClient) CreateOrUpdateSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteConnectionsClient) (erc ExpressRouteConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erc.Response.Response, err = future.GetResult(sender) - if erc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { - erc, err = client.CreateOrUpdateResponder(erc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -221,20 +187,7 @@ func (client ExpressRouteConnectionsClient) DeleteSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnectionpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnectionpeerings.go index b8286b7f2dc..24290f89237 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnectionpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnectionpeerings.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -124,30 +113,7 @@ func (client ExpressRouteCrossConnectionPeeringsClient) CreateOrUpdateSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionPeeringsClient) (erccp ExpressRouteCrossConnectionPeering, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erccp.Response.Response, err = future.GetResult(sender) - if erccp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erccp.Response.Response.StatusCode != http.StatusNoContent { - erccp, err = client.CreateOrUpdateResponder(erccp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", erccp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -227,20 +193,7 @@ func (client ExpressRouteCrossConnectionPeeringsClient) DeleteSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionPeeringsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnections.go index 83b0da14455..2fb654a3111 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutecrossconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ExpressRouteCrossConnectionsClient) CreateOrUpdateSender(req *http. var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercc.Response.Response, err = future.GetResult(sender) - if ercc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { - ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -403,30 +369,7 @@ func (client ExpressRouteCrossConnectionsClient) ListArpTableSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListArpTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercatlr.Response.Response, err = future.GetResult(sender) - if ercatlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { - ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -625,30 +568,7 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSender(req *http var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercrtlr.Response.Response, err = future.GetResult(sender) - if ercrtlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { - ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -731,30 +651,7 @@ func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummarySender(re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erccrtslr.Response.Response, err = future.GetResult(sender) - if erccrtslr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", nil, "received nil response and error") - } - if err == nil && erccrtslr.Response.Response.StatusCode != http.StatusNoContent { - erccrtslr, err = client.ListRoutesTableSummaryResponder(erccrtslr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", erccrtslr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -835,30 +732,7 @@ func (client ExpressRouteCrossConnectionsClient) UpdateTagsSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ercc.Response.Response, err = future.GetResult(sender) - if ercc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { - ercc, err = client.UpdateTagsResponder(ercc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", ercc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutegateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutegateways.go index 15630c22e8e..c136a6f53d5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutegateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutegateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,30 +105,7 @@ func (client ExpressRouteGatewaysClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteGatewaysClient) (erg ExpressRouteGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erg.Response.Response, err = future.GetResult(sender) - if erg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erg.Response.Response.StatusCode != http.StatusNoContent { - erg, err = client.CreateOrUpdateResponder(erg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", erg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -218,20 +184,7 @@ func (client ExpressRouteGatewaysClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRouteGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutelinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutelinks.go index 9c3e8db073b..9f310216342 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutelinks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressroutelinks.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteports.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteports.go index cc80aa1fcbc..2b25e5db683 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteports.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteports.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client ExpressRoutePortsClient) CreateOrUpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erp.Response.Response, err = future.GetResult(sender) - if erp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { - erp, err = client.CreateOrUpdateResponder(erp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", erp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client ExpressRoutePortsClient) DeleteSender(req *http.Request) (future Ex var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRoutePortsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -607,30 +560,7 @@ func (client ExpressRoutePortsClient) UpdateTagsSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erp.Response.Response, err = future.GetResult(sender) - if erp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { - erp, err = client.UpdateTagsResponder(erp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", erp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteportslocations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteportslocations.go index f85da9b3546..07431ac6348 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteportslocations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteportslocations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteserviceproviders.go index 9dffe3321ae..4b766abf2b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteserviceproviders.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/expressrouteserviceproviders.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicies.go index 625cab542a0..5fac890c6e7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicies.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client FirewallPoliciesClient) CreateOrUpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client FirewallPoliciesClient) (fp FirewallPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - fp.Response.Response, err = future.GetResult(sender) - if fp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && fp.Response.Response.StatusCode != http.StatusNoContent { - fp, err = client.CreateOrUpdateResponder(fp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", fp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -209,20 +175,7 @@ func (client FirewallPoliciesClient) DeleteSender(req *http.Request) (future Fir var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client FirewallPoliciesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.FirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicyrulegroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicyrulegroups.go index 53649a95207..0b56699a9c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicyrulegroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/firewallpolicyrulegroups.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client FirewallPolicyRuleGroupsClient) CreateOrUpdateSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client FirewallPolicyRuleGroupsClient) (fprg FirewallPolicyRuleGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - fprg.Response.Response, err = future.GetResult(sender) - if fprg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && fprg.Response.Response.StatusCode != http.StatusNoContent { - fprg, err = client.CreateOrUpdateResponder(fprg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", fprg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client FirewallPolicyRuleGroupsClient) DeleteSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client FirewallPolicyRuleGroupsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/hubvirtualnetworkconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/hubvirtualnetworkconnections.go index 2110b40a108..2b0c0fd1cf0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/hubvirtualnetworkconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/hubvirtualnetworkconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/inboundnatrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/inboundnatrules.go index 0a7fd97c50c..4c6bf222233 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/inboundnatrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/inboundnatrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -129,30 +118,7 @@ func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InboundNatRulesClient) (inr InboundNatRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - inr.Response.Response, err = future.GetResult(sender) - if inr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && inr.Response.Response.StatusCode != http.StatusNoContent { - inr, err = client.CreateOrUpdateResponder(inr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", inr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -232,20 +198,7 @@ func (client InboundNatRulesClient) DeleteSender(req *http.Request) (future Inbo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InboundNatRulesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InboundNatRulesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceipconfigurations.go index 75bdc3824f7..830cc18aa8a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceipconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceloadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceloadbalancers.go index 75e4ef2b0f8..26384ebaea3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceloadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfaceloadbalancers.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacesgroup.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacesgroup.go index 88fe2e97893..5ce62ff58f9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacesgroup.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacesgroup.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (future I var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (i Interface, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.CreateOrUpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client InterfacesClient) DeleteSender(req *http.Request) (future Interface var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -377,30 +330,7 @@ func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (erlr EffectiveRouteListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesGetEffectiveRouteTableFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - erlr.Response.Response, err = future.GetResult(sender) - if erlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", nil, "received nil response and error") - } - if err == nil && erlr.Response.Response.StatusCode != http.StatusNoContent { - erlr, err = client.GetEffectiveRouteTableResponder(erlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", erlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -878,30 +808,7 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (ensglr EffectiveNetworkSecurityGroupListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesListEffectiveNetworkSecurityGroupsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ensglr.Response.Response, err = future.GetResult(sender) - if ensglr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", nil, "received nil response and error") - } - if err == nil && ensglr.Response.Response.StatusCode != http.StatusNoContent { - ensglr, err = client.ListEffectiveNetworkSecurityGroupsResponder(ensglr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", ensglr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1348,30 +1255,7 @@ func (client InterfacesClient) UpdateTagsSender(req *http.Request) (future Inter var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfacesClient) (i Interface, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfacesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - i.Response.Response, err = future.GetResult(sender) - if i.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.UpdateTagsResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacetapconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacetapconfigurations.go index b08fe88fc6b..355b1c89fe1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacetapconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/interfacetapconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -146,30 +135,7 @@ func (client InterfaceTapConfigurationsClient) CreateOrUpdateSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfaceTapConfigurationsClient) (itc InterfaceTapConfiguration, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - itc.Response.Response, err = future.GetResult(sender) - if itc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && itc.Response.Response.StatusCode != http.StatusNoContent { - itc, err = client.CreateOrUpdateResponder(itc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", itc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -249,20 +215,7 @@ func (client InterfaceTapConfigurationsClient) DeleteSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client InterfaceTapConfigurationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerbackendaddresspools.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerbackendaddresspools.go index 17a45735e0f..50bf79604a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerbackendaddresspools.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerbackendaddresspools.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerfrontendipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerfrontendipconfigurations.go index 3dbf40f169c..28398ae2ca6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerfrontendipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerfrontendipconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerloadbalancingrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerloadbalancingrules.go index 243c7ef7124..cb879046915 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerloadbalancingrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerloadbalancingrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancernetworkinterfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancernetworkinterfaces.go index 560a5576917..be9b79ecc18 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancernetworkinterfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancernetworkinterfaces.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalanceroutboundrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalanceroutboundrules.go index b09b5ed82fb..81cafbda1d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalanceroutboundrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalanceroutboundrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerprobes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerprobes.go index dedadceb6f6..c99bacb7b8c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerprobes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancerprobes.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancers.go index df0049aea6b..749610065dd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/loadbalancers.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LoadBalancersClient) (lb LoadBalancer, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LoadBalancersCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lb.Response.Response, err = future.GetResult(sender) - if lb.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { - lb, err = client.CreateOrUpdateResponder(lb.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", lb.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client LoadBalancersClient) DeleteSender(req *http.Request) (future LoadBa var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LoadBalancersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LoadBalancersDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -609,30 +562,7 @@ func (client LoadBalancersClient) UpdateTagsSender(req *http.Request) (future Lo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LoadBalancersClient) (lb LoadBalancer, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LoadBalancersUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lb.Response.Response, err = future.GetResult(sender) - if lb.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { - lb, err = client.UpdateTagsResponder(lb.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", lb.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/localnetworkgateways.go index 242eb2c02c9..03784773452 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/localnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/localnetworkgateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,30 +105,7 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lng.Response.Response, err = future.GetResult(sender) - if lng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { - lng, err = client.CreateOrUpdateResponder(lng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", lng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -223,20 +189,7 @@ func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LocalNetworkGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -520,30 +473,7 @@ func (client LocalNetworkGatewaysClient) UpdateTagsSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - lng.Response.Response, err = future.GetResult(sender) - if lng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { - lng, err = client.UpdateTagsResponder(lng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", lng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/models.go index 0e916aa4e2d..3ff405be27f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/models.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -2403,6 +2392,39 @@ type ApplicationGatewaysBackendHealthFuture struct { Result func(ApplicationGatewaysClient) (ApplicationGatewayBackendHealth, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysBackendHealthFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysBackendHealthFuture.Result. +func (future *ApplicationGatewaysBackendHealthFuture) result(client ApplicationGatewaysClient) (agbh ApplicationGatewayBackendHealth, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysBackendHealthFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if agbh.Response.Response, err = future.GetResult(sender); err == nil && agbh.Response.Response.StatusCode != http.StatusNoContent { + agbh, err = client.BackendHealthResponder(agbh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthFuture", "Result", agbh.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationGatewaysBackendHealthOnDemandFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type ApplicationGatewaysBackendHealthOnDemandFuture struct { @@ -2412,6 +2434,39 @@ type ApplicationGatewaysBackendHealthOnDemandFuture struct { Result func(ApplicationGatewaysClient) (ApplicationGatewayBackendHealthOnDemand, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysBackendHealthOnDemandFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysBackendHealthOnDemandFuture.Result. +func (future *ApplicationGatewaysBackendHealthOnDemandFuture) result(client ApplicationGatewaysClient) (agbhod ApplicationGatewayBackendHealthOnDemand, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthOnDemandFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysBackendHealthOnDemandFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if agbhod.Response.Response, err = future.GetResult(sender); err == nil && agbhod.Response.Response.StatusCode != http.StatusNoContent { + agbhod, err = client.BackendHealthOnDemandResponder(agbhod.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysBackendHealthOnDemandFuture", "Result", agbhod.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationGatewaysCreateOrUpdateFuture struct { @@ -2421,6 +2476,39 @@ type ApplicationGatewaysCreateOrUpdateFuture struct { Result func(ApplicationGatewaysClient) (ApplicationGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysCreateOrUpdateFuture.Result. +func (future *ApplicationGatewaysCreateOrUpdateFuture) result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ag.Response.Response, err = future.GetResult(sender); err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { + ag, err = client.CreateOrUpdateResponder(ag.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", ag.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationGatewaysDeleteFuture struct { @@ -2430,6 +2518,33 @@ type ApplicationGatewaysDeleteFuture struct { Result func(ApplicationGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysDeleteFuture.Result. +func (future *ApplicationGatewaysDeleteFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationGatewaySku SKU of an application gateway. type ApplicationGatewaySku struct { // Name - Name of an application gateway SKU. Possible values include: 'StandardSmall', 'StandardMedium', 'StandardLarge', 'WAFMedium', 'WAFLarge', 'StandardV2', 'WAFV2' @@ -2650,6 +2765,33 @@ type ApplicationGatewaysStartFuture struct { Result func(ApplicationGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysStartFuture.Result. +func (future *ApplicationGatewaysStartFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStartFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationGatewaysStopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ApplicationGatewaysStopFuture struct { @@ -2659,6 +2801,33 @@ type ApplicationGatewaysStopFuture struct { Result func(ApplicationGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysStopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysStopFuture.Result. +func (future *ApplicationGatewaysStopFuture) result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStopFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationGatewaysUpdateTagsFuture struct { @@ -2668,6 +2837,39 @@ type ApplicationGatewaysUpdateTagsFuture struct { Result func(ApplicationGatewaysClient) (ApplicationGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationGatewaysUpdateTagsFuture.Result. +func (future *ApplicationGatewaysUpdateTagsFuture) result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ag.Response.Response, err = future.GetResult(sender); err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { + ag, err = client.UpdateTagsResponder(ag.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysUpdateTagsFuture", "Result", ag.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationGatewayTrustedRootCertificate trusted Root certificates of an application gateway. type ApplicationGatewayTrustedRootCertificate struct { // ApplicationGatewayTrustedRootCertificatePropertiesFormat - Properties of the application gateway trusted root certificate. @@ -3283,6 +3485,39 @@ type ApplicationSecurityGroupsCreateOrUpdateFuture struct { Result func(ApplicationSecurityGroupsClient) (ApplicationSecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationSecurityGroupsCreateOrUpdateFuture.Result. +func (future *ApplicationSecurityGroupsCreateOrUpdateFuture) result(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if asg.Response.Response, err = future.GetResult(sender); err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { + asg, err = client.CreateOrUpdateResponder(asg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsCreateOrUpdateFuture", "Result", asg.Response.Response, "Failure responding to request") + } + } + return +} + // ApplicationSecurityGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationSecurityGroupsDeleteFuture struct { @@ -3292,6 +3527,33 @@ type ApplicationSecurityGroupsDeleteFuture struct { Result func(ApplicationSecurityGroupsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationSecurityGroupsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationSecurityGroupsDeleteFuture.Result. +func (future *ApplicationSecurityGroupsDeleteFuture) result(client ApplicationSecurityGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ApplicationSecurityGroupsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ApplicationSecurityGroupsUpdateTagsFuture struct { @@ -3301,6 +3563,39 @@ type ApplicationSecurityGroupsUpdateTagsFuture struct { Result func(ApplicationSecurityGroupsClient) (ApplicationSecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ApplicationSecurityGroupsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ApplicationSecurityGroupsUpdateTagsFuture.Result. +func (future *ApplicationSecurityGroupsUpdateTagsFuture) result(client ApplicationSecurityGroupsClient) (asg ApplicationSecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationSecurityGroupsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if asg.Response.Response, err = future.GetResult(sender); err == nil && asg.Response.Response.StatusCode != http.StatusNoContent { + asg, err = client.UpdateTagsResponder(asg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsUpdateTagsFuture", "Result", asg.Response.Response, "Failure responding to request") + } + } + return +} + // AuthorizationListResult response for ListAuthorizations API service call retrieves all authorizations // that belongs to an ExpressRouteCircuit. type AuthorizationListResult struct { @@ -5167,6 +5462,39 @@ type AzureFirewallsCreateOrUpdateFuture struct { Result func(AzureFirewallsClient) (AzureFirewall, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AzureFirewallsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AzureFirewallsCreateOrUpdateFuture.Result. +func (future *AzureFirewallsCreateOrUpdateFuture) result(client AzureFirewallsClient) (af AzureFirewall, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if af.Response.Response, err = future.GetResult(sender); err == nil && af.Response.Response.StatusCode != http.StatusNoContent { + af, err = client.CreateOrUpdateResponder(af.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AzureFirewallsCreateOrUpdateFuture", "Result", af.Response.Response, "Failure responding to request") + } + } + return +} + // AzureFirewallsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type AzureFirewallsDeleteFuture struct { @@ -5176,6 +5504,33 @@ type AzureFirewallsDeleteFuture struct { Result func(AzureFirewallsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AzureFirewallsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AzureFirewallsDeleteFuture.Result. +func (future *AzureFirewallsDeleteFuture) result(client AzureFirewallsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AzureFirewallsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.AzureFirewallsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // AzureReachabilityReport azure reachability report details. type AzureReachabilityReport struct { autorest.Response `json:"-"` @@ -5739,6 +6094,39 @@ type BastionHostsCreateOrUpdateFuture struct { Result func(BastionHostsClient) (BastionHost, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BastionHostsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BastionHostsCreateOrUpdateFuture.Result. +func (future *BastionHostsCreateOrUpdateFuture) result(client BastionHostsClient) (bh BastionHost, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BastionHostsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.BastionHostsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bh.Response.Response, err = future.GetResult(sender); err == nil && bh.Response.Response.StatusCode != http.StatusNoContent { + bh, err = client.CreateOrUpdateResponder(bh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BastionHostsCreateOrUpdateFuture", "Result", bh.Response.Response, "Failure responding to request") + } + } + return +} + // BastionHostsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type BastionHostsDeleteFuture struct { @@ -5748,6 +6136,33 @@ type BastionHostsDeleteFuture struct { Result func(BastionHostsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BastionHostsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BastionHostsDeleteFuture.Result. +func (future *BastionHostsDeleteFuture) result(client BastionHostsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BastionHostsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.BastionHostsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // BGPCommunity contains bgp community information offered in Service Community resources. type BGPCommunity struct { // ServiceSupportedRegion - The region which the service support. e.g. For O365, region is Global. @@ -6384,6 +6799,39 @@ type ConnectionMonitorsCreateOrUpdateFuture struct { Result func(ConnectionMonitorsClient) (ConnectionMonitorResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsCreateOrUpdateFuture.Result. +func (future *ConnectionMonitorsCreateOrUpdateFuture) result(client ConnectionMonitorsClient) (cmr ConnectionMonitorResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cmr.Response.Response, err = future.GetResult(sender); err == nil && cmr.Response.Response.StatusCode != http.StatusNoContent { + cmr, err = client.CreateOrUpdateResponder(cmr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsCreateOrUpdateFuture", "Result", cmr.Response.Response, "Failure responding to request") + } + } + return +} + // ConnectionMonitorsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ConnectionMonitorsDeleteFuture struct { @@ -6393,6 +6841,33 @@ type ConnectionMonitorsDeleteFuture struct { Result func(ConnectionMonitorsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsDeleteFuture.Result. +func (future *ConnectionMonitorsDeleteFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectionMonitorSource describes the source of connection monitor. type ConnectionMonitorSource struct { // ResourceID - The ID of the resource used as the source by connection monitor. @@ -6410,6 +6885,39 @@ type ConnectionMonitorsQueryFuture struct { Result func(ConnectionMonitorsClient) (ConnectionMonitorQueryResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsQueryFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsQueryFuture.Result. +func (future *ConnectionMonitorsQueryFuture) result(client ConnectionMonitorsClient) (cmqr ConnectionMonitorQueryResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsQueryFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cmqr.Response.Response, err = future.GetResult(sender); err == nil && cmqr.Response.Response.StatusCode != http.StatusNoContent { + cmqr, err = client.QueryResponder(cmqr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsQueryFuture", "Result", cmqr.Response.Response, "Failure responding to request") + } + } + return +} + // ConnectionMonitorsStartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ConnectionMonitorsStartFuture struct { @@ -6419,6 +6927,33 @@ type ConnectionMonitorsStartFuture struct { Result func(ConnectionMonitorsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsStartFuture.Result. +func (future *ConnectionMonitorsStartFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStartFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectionMonitorsStopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ConnectionMonitorsStopFuture struct { @@ -6428,6 +6963,33 @@ type ConnectionMonitorsStopFuture struct { Result func(ConnectionMonitorsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectionMonitorsStopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectionMonitorsStopFuture.Result. +func (future *ConnectionMonitorsStopFuture) result(client ConnectionMonitorsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ConnectionMonitorsStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ConnectionMonitorsStopFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectionResetSharedKey the virtual network connection reset shared key. type ConnectionResetSharedKey struct { autorest.Response `json:"-"` @@ -6912,6 +7474,39 @@ type DdosCustomPoliciesCreateOrUpdateFuture struct { Result func(DdosCustomPoliciesClient) (DdosCustomPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosCustomPoliciesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosCustomPoliciesCreateOrUpdateFuture.Result. +func (future *DdosCustomPoliciesCreateOrUpdateFuture) result(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dcp.Response.Response, err = future.GetResult(sender); err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { + dcp, err = client.CreateOrUpdateResponder(dcp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesCreateOrUpdateFuture", "Result", dcp.Response.Response, "Failure responding to request") + } + } + return +} + // DdosCustomPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosCustomPoliciesDeleteFuture struct { @@ -6921,6 +7516,33 @@ type DdosCustomPoliciesDeleteFuture struct { Result func(DdosCustomPoliciesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosCustomPoliciesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosCustomPoliciesDeleteFuture.Result. +func (future *DdosCustomPoliciesDeleteFuture) result(client DdosCustomPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DdosCustomPoliciesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosCustomPoliciesUpdateTagsFuture struct { @@ -6930,6 +7552,39 @@ type DdosCustomPoliciesUpdateTagsFuture struct { Result func(DdosCustomPoliciesClient) (DdosCustomPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosCustomPoliciesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosCustomPoliciesUpdateTagsFuture.Result. +func (future *DdosCustomPoliciesUpdateTagsFuture) result(client DdosCustomPoliciesClient) (dcp DdosCustomPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosCustomPoliciesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dcp.Response.Response, err = future.GetResult(sender); err == nil && dcp.Response.Response.StatusCode != http.StatusNoContent { + dcp, err = client.UpdateTagsResponder(dcp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosCustomPoliciesUpdateTagsFuture", "Result", dcp.Response.Response, "Failure responding to request") + } + } + return +} + // DdosCustomPolicy a DDoS custom policy in a resource group. type DdosCustomPolicy struct { autorest.Response `json:"-"` @@ -7365,6 +8020,39 @@ type DdosProtectionPlansCreateOrUpdateFuture struct { Result func(DdosProtectionPlansClient) (DdosProtectionPlan, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosProtectionPlansCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosProtectionPlansCreateOrUpdateFuture.Result. +func (future *DdosProtectionPlansCreateOrUpdateFuture) result(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dpp.Response.Response, err = future.GetResult(sender); err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { + dpp, err = client.CreateOrUpdateResponder(dpp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansCreateOrUpdateFuture", "Result", dpp.Response.Response, "Failure responding to request") + } + } + return +} + // DdosProtectionPlansDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosProtectionPlansDeleteFuture struct { @@ -7374,6 +8062,33 @@ type DdosProtectionPlansDeleteFuture struct { Result func(DdosProtectionPlansClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosProtectionPlansDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosProtectionPlansDeleteFuture.Result. +func (future *DdosProtectionPlansDeleteFuture) result(client DdosProtectionPlansClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DdosProtectionPlansUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DdosProtectionPlansUpdateTagsFuture struct { @@ -7383,6 +8098,39 @@ type DdosProtectionPlansUpdateTagsFuture struct { Result func(DdosProtectionPlansClient) (DdosProtectionPlan, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DdosProtectionPlansUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DdosProtectionPlansUpdateTagsFuture.Result. +func (future *DdosProtectionPlansUpdateTagsFuture) result(client DdosProtectionPlansClient) (dpp DdosProtectionPlan, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.DdosProtectionPlansUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dpp.Response.Response, err = future.GetResult(sender); err == nil && dpp.Response.Response.StatusCode != http.StatusNoContent { + dpp, err = client.UpdateTagsResponder(dpp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DdosProtectionPlansUpdateTagsFuture", "Result", dpp.Response.Response, "Failure responding to request") + } + } + return +} + // DdosSettings contains the DDoS protection settings of the public IP. type DdosSettings struct { // DdosCustomPolicy - The DDoS custom policy associated with the public IP. @@ -8109,6 +8857,39 @@ type ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitAuthorizationsClient) (ExpressRouteCircuitAuthorization, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) result(client ExpressRouteCircuitAuthorizationsClient) (erca ExpressRouteCircuitAuthorization, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erca.Response.Response, err = future.GetResult(sender); err == nil && erca.Response.Response.StatusCode != http.StatusNoContent { + erca, err = client.CreateOrUpdateResponder(erca.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", erca.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitAuthorizationsDeleteFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type ExpressRouteCircuitAuthorizationsDeleteFuture struct { @@ -8118,6 +8899,33 @@ type ExpressRouteCircuitAuthorizationsDeleteFuture struct { Result func(ExpressRouteCircuitAuthorizationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitAuthorizationsDeleteFuture.Result. +func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) result(client ExpressRouteCircuitAuthorizationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitConnection express Route Circuit Connection in an ExpressRouteCircuitPeering // resource. type ExpressRouteCircuitConnection struct { @@ -8416,6 +9224,39 @@ type ExpressRouteCircuitConnectionsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitConnectionsClient) (ExpressRouteCircuitConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitConnectionsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitConnectionsCreateOrUpdateFuture) result(client ExpressRouteCircuitConnectionsClient) (ercc ExpressRouteCircuitConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { + ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitConnectionsDeleteFuture struct { @@ -8425,6 +9266,33 @@ type ExpressRouteCircuitConnectionsDeleteFuture struct { Result func(ExpressRouteCircuitConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitConnectionsDeleteFuture.Result. +func (future *ExpressRouteCircuitConnectionsDeleteFuture) result(client ExpressRouteCircuitConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitListResult response for ListExpressRouteCircuit API service call. type ExpressRouteCircuitListResult struct { autorest.Response `json:"-"` @@ -8974,6 +9842,39 @@ type ExpressRouteCircuitPeeringsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitPeeringsClient) (ExpressRouteCircuitPeering, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitPeeringsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) result(client ExpressRouteCircuitPeeringsClient) (ercp ExpressRouteCircuitPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercp.Response.Response, err = future.GetResult(sender); err == nil && ercp.Response.Response.StatusCode != http.StatusNoContent { + ercp, err = client.CreateOrUpdateResponder(ercp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", ercp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitPeeringsDeleteFuture struct { @@ -8983,6 +9884,33 @@ type ExpressRouteCircuitPeeringsDeleteFuture struct { Result func(ExpressRouteCircuitPeeringsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitPeeringsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitPeeringsDeleteFuture.Result. +func (future *ExpressRouteCircuitPeeringsDeleteFuture) result(client ExpressRouteCircuitPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitPropertiesFormat properties of ExpressRouteCircuit. type ExpressRouteCircuitPropertiesFormat struct { // AllowClassicOperations - Allow classic operations. @@ -9113,6 +10041,39 @@ type ExpressRouteCircuitsCreateOrUpdateFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuit, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCircuitsCreateOrUpdateFuture) result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erc.Response.Response, err = future.GetResult(sender); err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { + erc, err = client.CreateOrUpdateResponder(erc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitsDeleteFuture struct { @@ -9122,6 +10083,33 @@ type ExpressRouteCircuitsDeleteFuture struct { Result func(ExpressRouteCircuitsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsDeleteFuture.Result. +func (future *ExpressRouteCircuitsDeleteFuture) result(client ExpressRouteCircuitsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCircuitServiceProviderProperties contains ServiceProviderProperties in an // ExpressRouteCircuit. type ExpressRouteCircuitServiceProviderProperties struct { @@ -9152,6 +10140,39 @@ type ExpressRouteCircuitsListArpTableFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsArpTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsListArpTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsListArpTableFuture.Result. +func (future *ExpressRouteCircuitsListArpTableFuture) result(client ExpressRouteCircuitsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListArpTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercatlr.Response.Response, err = future.GetResult(sender); err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { + ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsListRoutesTableFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteCircuitsListRoutesTableFuture struct { @@ -9161,6 +10182,39 @@ type ExpressRouteCircuitsListRoutesTableFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsRoutesTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsListRoutesTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsListRoutesTableFuture.Result. +func (future *ExpressRouteCircuitsListRoutesTableFuture) result(client ExpressRouteCircuitsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercrtlr.Response.Response, err = future.GetResult(sender); err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { + ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsListRoutesTableSummaryFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type ExpressRouteCircuitsListRoutesTableSummaryFuture struct { @@ -9170,6 +10224,39 @@ type ExpressRouteCircuitsListRoutesTableSummaryFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuitsRoutesTableSummaryListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsListRoutesTableSummaryFuture.Result. +func (future *ExpressRouteCircuitsListRoutesTableSummaryFuture) result(client ExpressRouteCircuitsClient) (ercrtslr ExpressRouteCircuitsRoutesTableSummaryListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsListRoutesTableSummaryFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercrtslr.Response.Response, err = future.GetResult(sender); err == nil && ercrtslr.Response.Response.StatusCode != http.StatusNoContent { + ercrtslr, err = client.ListRoutesTableSummaryResponder(ercrtslr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsListRoutesTableSummaryFuture", "Result", ercrtslr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCircuitsRoutesTableListResult response for ListRoutesTable associated with the Express Route // Circuits API. type ExpressRouteCircuitsRoutesTableListResult struct { @@ -9212,6 +10299,39 @@ type ExpressRouteCircuitsUpdateTagsFuture struct { Result func(ExpressRouteCircuitsClient) (ExpressRouteCircuit, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCircuitsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCircuitsUpdateTagsFuture.Result. +func (future *ExpressRouteCircuitsUpdateTagsFuture) result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erc.Response.Response, err = future.GetResult(sender); err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { + erc, err = client.UpdateTagsResponder(erc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsUpdateTagsFuture", "Result", erc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteConnection expressRouteConnection resource. type ExpressRouteConnection struct { autorest.Response `json:"-"` @@ -9314,6 +10434,39 @@ type ExpressRouteConnectionsCreateOrUpdateFuture struct { Result func(ExpressRouteConnectionsClient) (ExpressRouteConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteConnectionsCreateOrUpdateFuture.Result. +func (future *ExpressRouteConnectionsCreateOrUpdateFuture) result(client ExpressRouteConnectionsClient) (erc ExpressRouteConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erc.Response.Response, err = future.GetResult(sender); err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { + erc, err = client.CreateOrUpdateResponder(erc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteConnectionsDeleteFuture struct { @@ -9323,6 +10476,33 @@ type ExpressRouteConnectionsDeleteFuture struct { Result func(ExpressRouteConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteConnectionsDeleteFuture.Result. +func (future *ExpressRouteConnectionsDeleteFuture) result(client ExpressRouteConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCrossConnection expressRouteCrossConnection resource. type ExpressRouteCrossConnection struct { autorest.Response `json:"-"` @@ -9938,6 +11118,39 @@ type ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture struct { Result func(ExpressRouteCrossConnectionPeeringsClient) (ExpressRouteCrossConnectionPeering, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture) result(client ExpressRouteCrossConnectionPeeringsClient) (erccp ExpressRouteCrossConnectionPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erccp.Response.Response, err = future.GetResult(sender); err == nil && erccp.Response.Response.StatusCode != http.StatusNoContent { + erccp, err = client.CreateOrUpdateResponder(erccp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsCreateOrUpdateFuture", "Result", erccp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionPeeringsDeleteFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type ExpressRouteCrossConnectionPeeringsDeleteFuture struct { @@ -9947,6 +11160,33 @@ type ExpressRouteCrossConnectionPeeringsDeleteFuture struct { Result func(ExpressRouteCrossConnectionPeeringsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionPeeringsDeleteFuture.Result. +func (future *ExpressRouteCrossConnectionPeeringsDeleteFuture) result(client ExpressRouteCrossConnectionPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteCrossConnectionProperties properties of ExpressRouteCrossConnection. type ExpressRouteCrossConnectionProperties struct { // PrimaryAzurePort - READ-ONLY; The name of the primary port. @@ -10016,6 +11256,39 @@ type ExpressRouteCrossConnectionsCreateOrUpdateFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsCreateOrUpdateFuture.Result. +func (future *ExpressRouteCrossConnectionsCreateOrUpdateFuture) result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { + ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionsListArpTableFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type ExpressRouteCrossConnectionsListArpTableFuture struct { @@ -10025,6 +11298,39 @@ type ExpressRouteCrossConnectionsListArpTableFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCircuitsArpTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsListArpTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsListArpTableFuture.Result. +func (future *ExpressRouteCrossConnectionsListArpTableFuture) result(client ExpressRouteCrossConnectionsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListArpTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercatlr.Response.Response, err = future.GetResult(sender); err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent { + ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionsListRoutesTableFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type ExpressRouteCrossConnectionsListRoutesTableFuture struct { @@ -10034,6 +11340,39 @@ type ExpressRouteCrossConnectionsListRoutesTableFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCircuitsRoutesTableListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsListRoutesTableFuture.Result. +func (future *ExpressRouteCrossConnectionsListRoutesTableFuture) result(client ExpressRouteCrossConnectionsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercrtlr.Response.Response, err = future.GetResult(sender); err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent { + ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionsListRoutesTableSummaryFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type ExpressRouteCrossConnectionsListRoutesTableSummaryFuture struct { @@ -10043,6 +11382,39 @@ type ExpressRouteCrossConnectionsListRoutesTableSummaryFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnectionsRoutesTableSummaryListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsListRoutesTableSummaryFuture.Result. +func (future *ExpressRouteCrossConnectionsListRoutesTableSummaryFuture) result(client ExpressRouteCrossConnectionsClient) (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erccrtslr.Response.Response, err = future.GetResult(sender); err == nil && erccrtslr.Response.Response.StatusCode != http.StatusNoContent { + erccrtslr, err = client.ListRoutesTableSummaryResponder(erccrtslr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", erccrtslr.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteCrossConnectionsRoutesTableSummaryListResult response for ListRoutesTable associated with // the Express Route Cross Connections. type ExpressRouteCrossConnectionsRoutesTableSummaryListResult struct { @@ -10071,6 +11443,39 @@ type ExpressRouteCrossConnectionsUpdateTagsFuture struct { Result func(ExpressRouteCrossConnectionsClient) (ExpressRouteCrossConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteCrossConnectionsUpdateTagsFuture.Result. +func (future *ExpressRouteCrossConnectionsUpdateTagsFuture) result(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent { + ercc, err = client.UpdateTagsResponder(ercc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", ercc.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteGateway expressRoute gateway resource. type ExpressRouteGateway struct { autorest.Response `json:"-"` @@ -10244,6 +11649,39 @@ type ExpressRouteGatewaysCreateOrUpdateFuture struct { Result func(ExpressRouteGatewaysClient) (ExpressRouteGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteGatewaysCreateOrUpdateFuture.Result. +func (future *ExpressRouteGatewaysCreateOrUpdateFuture) result(client ExpressRouteGatewaysClient) (erg ExpressRouteGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erg.Response.Response, err = future.GetResult(sender); err == nil && erg.Response.Response.StatusCode != http.StatusNoContent { + erg, err = client.CreateOrUpdateResponder(erg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysCreateOrUpdateFuture", "Result", erg.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ExpressRouteGatewaysDeleteFuture struct { @@ -10253,6 +11691,33 @@ type ExpressRouteGatewaysDeleteFuture struct { Result func(ExpressRouteGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRouteGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRouteGatewaysDeleteFuture.Result. +func (future *ExpressRouteGatewaysDeleteFuture) result(client ExpressRouteGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRouteLink expressRouteLink child resource definition. type ExpressRouteLink struct { autorest.Response `json:"-"` @@ -10877,6 +12342,39 @@ type ExpressRoutePortsCreateOrUpdateFuture struct { Result func(ExpressRoutePortsClient) (ExpressRoutePort, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRoutePortsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRoutePortsCreateOrUpdateFuture.Result. +func (future *ExpressRoutePortsCreateOrUpdateFuture) result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erp.Response.Response, err = future.GetResult(sender); err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { + erp, err = client.CreateOrUpdateResponder(erp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsCreateOrUpdateFuture", "Result", erp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRoutePortsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ExpressRoutePortsDeleteFuture struct { @@ -10886,6 +12384,33 @@ type ExpressRoutePortsDeleteFuture struct { Result func(ExpressRoutePortsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRoutePortsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRoutePortsDeleteFuture.Result. +func (future *ExpressRoutePortsDeleteFuture) result(client ExpressRoutePortsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExpressRoutePortsLocation definition of the ExpressRoutePorts peering location resource. type ExpressRoutePortsLocation struct { autorest.Response `json:"-"` @@ -11189,6 +12714,39 @@ type ExpressRoutePortsUpdateTagsFuture struct { Result func(ExpressRoutePortsClient) (ExpressRoutePort, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExpressRoutePortsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExpressRoutePortsUpdateTagsFuture.Result. +func (future *ExpressRoutePortsUpdateTagsFuture) result(client ExpressRoutePortsClient) (erp ExpressRoutePort, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRoutePortsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erp.Response.Response, err = future.GetResult(sender); err == nil && erp.Response.Response.StatusCode != http.StatusNoContent { + erp, err = client.UpdateTagsResponder(erp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRoutePortsUpdateTagsFuture", "Result", erp.Response.Response, "Failure responding to request") + } + } + return +} + // ExpressRouteServiceProvider a ExpressRouteResourceProvider object. type ExpressRouteServiceProvider struct { // ExpressRouteServiceProviderPropertiesFormat - Properties of the express route service provider. @@ -11480,6 +13038,39 @@ type FirewallPoliciesCreateOrUpdateFuture struct { Result func(FirewallPoliciesClient) (FirewallPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *FirewallPoliciesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for FirewallPoliciesCreateOrUpdateFuture.Result. +func (future *FirewallPoliciesCreateOrUpdateFuture) result(client FirewallPoliciesClient) (fp FirewallPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fp.Response.Response, err = future.GetResult(sender); err == nil && fp.Response.Response.StatusCode != http.StatusNoContent { + fp, err = client.CreateOrUpdateResponder(fp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", fp.Response.Response, "Failure responding to request") + } + } + return +} + // FirewallPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type FirewallPoliciesDeleteFuture struct { @@ -11489,6 +13080,33 @@ type FirewallPoliciesDeleteFuture struct { Result func(FirewallPoliciesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *FirewallPoliciesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for FirewallPoliciesDeleteFuture.Result. +func (future *FirewallPoliciesDeleteFuture) result(client FirewallPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // FirewallPolicy firewallPolicy Resource. type FirewallPolicy struct { autorest.Response `json:"-"` @@ -12568,6 +14186,39 @@ type FirewallPolicyRuleGroupsCreateOrUpdateFuture struct { Result func(FirewallPolicyRuleGroupsClient) (FirewallPolicyRuleGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *FirewallPolicyRuleGroupsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for FirewallPolicyRuleGroupsCreateOrUpdateFuture.Result. +func (future *FirewallPolicyRuleGroupsCreateOrUpdateFuture) result(client FirewallPolicyRuleGroupsClient) (fprg FirewallPolicyRuleGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fprg.Response.Response, err = future.GetResult(sender); err == nil && fprg.Response.Response.StatusCode != http.StatusNoContent { + fprg, err = client.CreateOrUpdateResponder(fprg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", fprg.Response.Response, "Failure responding to request") + } + } + return +} + // FirewallPolicyRuleGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type FirewallPolicyRuleGroupsDeleteFuture struct { @@ -12577,6 +14228,33 @@ type FirewallPolicyRuleGroupsDeleteFuture struct { Result func(FirewallPolicyRuleGroupsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *FirewallPolicyRuleGroupsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for FirewallPolicyRuleGroupsDeleteFuture.Result. +func (future *FirewallPolicyRuleGroupsDeleteFuture) result(client FirewallPolicyRuleGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // FlowLogFormatParameters parameters that define the flow log format. type FlowLogFormatParameters struct { // Type - The file type of flow log. Possible values include: 'JSON' @@ -13420,6 +15098,39 @@ type InboundNatRulesCreateOrUpdateFuture struct { Result func(InboundNatRulesClient) (InboundNatRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InboundNatRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InboundNatRulesCreateOrUpdateFuture.Result. +func (future *InboundNatRulesCreateOrUpdateFuture) result(client InboundNatRulesClient) (inr InboundNatRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if inr.Response.Response, err = future.GetResult(sender); err == nil && inr.Response.Response.StatusCode != http.StatusNoContent { + inr, err = client.CreateOrUpdateResponder(inr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesCreateOrUpdateFuture", "Result", inr.Response.Response, "Failure responding to request") + } + } + return +} + // InboundNatRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type InboundNatRulesDeleteFuture struct { @@ -13429,6 +15140,33 @@ type InboundNatRulesDeleteFuture struct { Result func(InboundNatRulesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InboundNatRulesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InboundNatRulesDeleteFuture.Result. +func (future *InboundNatRulesDeleteFuture) result(client InboundNatRulesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InboundNatRulesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // IntentPolicy network Intent Policy resource. type IntentPolicy struct { // Etag - A unique read-only string that changes whenever the resource is updated. @@ -14365,6 +16103,39 @@ type InterfacesCreateOrUpdateFuture struct { Result func(InterfacesClient) (Interface, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesCreateOrUpdateFuture.Result. +func (future *InterfacesCreateOrUpdateFuture) result(client InterfacesClient) (i Interface, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.CreateOrUpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // InterfacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type InterfacesDeleteFuture struct { @@ -14374,6 +16145,33 @@ type InterfacesDeleteFuture struct { Result func(InterfacesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesDeleteFuture.Result. +func (future *InterfacesDeleteFuture) result(client InterfacesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // InterfacesGetEffectiveRouteTableFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type InterfacesGetEffectiveRouteTableFuture struct { @@ -14383,6 +16181,39 @@ type InterfacesGetEffectiveRouteTableFuture struct { Result func(InterfacesClient) (EffectiveRouteListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesGetEffectiveRouteTableFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesGetEffectiveRouteTableFuture.Result. +func (future *InterfacesGetEffectiveRouteTableFuture) result(client InterfacesClient) (erlr EffectiveRouteListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesGetEffectiveRouteTableFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erlr.Response.Response, err = future.GetResult(sender); err == nil && erlr.Response.Response.StatusCode != http.StatusNoContent { + erlr, err = client.GetEffectiveRouteTableResponder(erlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesGetEffectiveRouteTableFuture", "Result", erlr.Response.Response, "Failure responding to request") + } + } + return +} + // InterfacesListEffectiveNetworkSecurityGroupsFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type InterfacesListEffectiveNetworkSecurityGroupsFuture struct { @@ -14392,6 +16223,39 @@ type InterfacesListEffectiveNetworkSecurityGroupsFuture struct { Result func(InterfacesClient) (EffectiveNetworkSecurityGroupListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesListEffectiveNetworkSecurityGroupsFuture.Result. +func (future *InterfacesListEffectiveNetworkSecurityGroupsFuture) result(client InterfacesClient) (ensglr EffectiveNetworkSecurityGroupListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesListEffectiveNetworkSecurityGroupsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ensglr.Response.Response, err = future.GetResult(sender); err == nil && ensglr.Response.Response.StatusCode != http.StatusNoContent { + ensglr, err = client.ListEffectiveNetworkSecurityGroupsResponder(ensglr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesListEffectiveNetworkSecurityGroupsFuture", "Result", ensglr.Response.Response, "Failure responding to request") + } + } + return +} + // InterfacesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type InterfacesUpdateTagsFuture struct { @@ -14401,6 +16265,39 @@ type InterfacesUpdateTagsFuture struct { Result func(InterfacesClient) (Interface, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfacesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfacesUpdateTagsFuture.Result. +func (future *InterfacesUpdateTagsFuture) result(client InterfacesClient) (i Interface, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.UpdateTagsResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesUpdateTagsFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + // InterfaceTapConfiguration tap configuration in a Network Interface. type InterfaceTapConfiguration struct { autorest.Response `json:"-"` @@ -14689,6 +16586,39 @@ type InterfaceTapConfigurationsCreateOrUpdateFuture struct { Result func(InterfaceTapConfigurationsClient) (InterfaceTapConfiguration, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfaceTapConfigurationsCreateOrUpdateFuture.Result. +func (future *InterfaceTapConfigurationsCreateOrUpdateFuture) result(client InterfaceTapConfigurationsClient) (itc InterfaceTapConfiguration, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if itc.Response.Response, err = future.GetResult(sender); err == nil && itc.Response.Response.StatusCode != http.StatusNoContent { + itc, err = client.CreateOrUpdateResponder(itc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsCreateOrUpdateFuture", "Result", itc.Response.Response, "Failure responding to request") + } + } + return +} + // InterfaceTapConfigurationsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type InterfaceTapConfigurationsDeleteFuture struct { @@ -14698,6 +16628,33 @@ type InterfaceTapConfigurationsDeleteFuture struct { Result func(InterfaceTapConfigurationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *InterfaceTapConfigurationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for InterfaceTapConfigurationsDeleteFuture.Result. +func (future *InterfaceTapConfigurationsDeleteFuture) result(client InterfaceTapConfigurationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceTapConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfaceTapConfigurationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // IPAddressAvailabilityResult response for CheckIPAddressAvailability API service call. type IPAddressAvailabilityResult struct { autorest.Response `json:"-"` @@ -17737,6 +19694,39 @@ type LoadBalancersCreateOrUpdateFuture struct { Result func(LoadBalancersClient) (LoadBalancer, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LoadBalancersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LoadBalancersCreateOrUpdateFuture.Result. +func (future *LoadBalancersCreateOrUpdateFuture) result(client LoadBalancersClient) (lb LoadBalancer, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lb.Response.Response, err = future.GetResult(sender); err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { + lb, err = client.CreateOrUpdateResponder(lb.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", lb.Response.Response, "Failure responding to request") + } + } + return +} + // LoadBalancersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type LoadBalancersDeleteFuture struct { @@ -17746,6 +19736,33 @@ type LoadBalancersDeleteFuture struct { Result func(LoadBalancersClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LoadBalancersDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LoadBalancersDeleteFuture.Result. +func (future *LoadBalancersDeleteFuture) result(client LoadBalancersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // LoadBalancerSku SKU of a load balancer. type LoadBalancerSku struct { // Name - Name of a load balancer SKU. Possible values include: 'LoadBalancerSkuNameBasic', 'LoadBalancerSkuNameStandard' @@ -17761,6 +19778,39 @@ type LoadBalancersUpdateTagsFuture struct { Result func(LoadBalancersClient) (LoadBalancer, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LoadBalancersUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LoadBalancersUpdateTagsFuture.Result. +func (future *LoadBalancersUpdateTagsFuture) result(client LoadBalancersClient) (lb LoadBalancer, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lb.Response.Response, err = future.GetResult(sender); err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { + lb, err = client.UpdateTagsResponder(lb.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersUpdateTagsFuture", "Result", lb.Response.Response, "Failure responding to request") + } + } + return +} + // LoadBalancingRule a load balancing rule for a load balancer. type LoadBalancingRule struct { autorest.Response `json:"-"` @@ -18210,6 +20260,39 @@ type LocalNetworkGatewaysCreateOrUpdateFuture struct { Result func(LocalNetworkGatewaysClient) (LocalNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LocalNetworkGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LocalNetworkGatewaysCreateOrUpdateFuture.Result. +func (future *LocalNetworkGatewaysCreateOrUpdateFuture) result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lng.Response.Response, err = future.GetResult(sender); err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { + lng, err = client.CreateOrUpdateResponder(lng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", lng.Response.Response, "Failure responding to request") + } + } + return +} + // LocalNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type LocalNetworkGatewaysDeleteFuture struct { @@ -18219,6 +20302,33 @@ type LocalNetworkGatewaysDeleteFuture struct { Result func(LocalNetworkGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LocalNetworkGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LocalNetworkGatewaysDeleteFuture.Result. +func (future *LocalNetworkGatewaysDeleteFuture) result(client LocalNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // LocalNetworkGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type LocalNetworkGatewaysUpdateTagsFuture struct { @@ -18228,6 +20338,39 @@ type LocalNetworkGatewaysUpdateTagsFuture struct { Result func(LocalNetworkGatewaysClient) (LocalNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *LocalNetworkGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for LocalNetworkGatewaysUpdateTagsFuture.Result. +func (future *LocalNetworkGatewaysUpdateTagsFuture) result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lng.Response.Response, err = future.GetResult(sender); err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { + lng, err = client.UpdateTagsResponder(lng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysUpdateTagsFuture", "Result", lng.Response.Response, "Failure responding to request") + } + } + return +} + // LogSpecification description of logging specification. type LogSpecification struct { // Name - The name of the specification. @@ -18683,6 +20826,39 @@ type NatGatewaysCreateOrUpdateFuture struct { Result func(NatGatewaysClient) (NatGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *NatGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for NatGatewaysCreateOrUpdateFuture.Result. +func (future *NatGatewaysCreateOrUpdateFuture) result(client NatGatewaysClient) (ng NatGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.NatGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.NatGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ng.Response.Response, err = future.GetResult(sender); err == nil && ng.Response.Response.StatusCode != http.StatusNoContent { + ng, err = client.CreateOrUpdateResponder(ng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.NatGatewaysCreateOrUpdateFuture", "Result", ng.Response.Response, "Failure responding to request") + } + } + return +} + // NatGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type NatGatewaysDeleteFuture struct { @@ -18692,6 +20868,33 @@ type NatGatewaysDeleteFuture struct { Result func(NatGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *NatGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for NatGatewaysDeleteFuture.Result. +func (future *NatGatewaysDeleteFuture) result(client NatGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.NatGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.NatGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // NatGatewaySku SKU of nat gateway. type NatGatewaySku struct { // Name - Name of Nat Gateway SKU. Possible values include: 'Standard' @@ -19265,6 +21468,39 @@ type P2sVpnGatewaysCreateOrUpdateFuture struct { Result func(P2sVpnGatewaysClient) (P2SVpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysCreateOrUpdateFuture.Result. +func (future *P2sVpnGatewaysCreateOrUpdateFuture) result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pvg.Response.Response, err = future.GetResult(sender); err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { + pvg, err = client.CreateOrUpdateResponder(pvg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", pvg.Response.Response, "Failure responding to request") + } + } + return +} + // P2sVpnGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type P2sVpnGatewaysDeleteFuture struct { @@ -19274,6 +21510,33 @@ type P2sVpnGatewaysDeleteFuture struct { Result func(P2sVpnGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysDeleteFuture.Result. +func (future *P2sVpnGatewaysDeleteFuture) result(client P2sVpnGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // P2sVpnGatewaysGenerateVpnProfileFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type P2sVpnGatewaysGenerateVpnProfileFuture struct { @@ -19283,6 +21546,39 @@ type P2sVpnGatewaysGenerateVpnProfileFuture struct { Result func(P2sVpnGatewaysClient) (VpnProfileResponse, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysGenerateVpnProfileFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysGenerateVpnProfileFuture.Result. +func (future *P2sVpnGatewaysGenerateVpnProfileFuture) result(client P2sVpnGatewaysClient) (vpr VpnProfileResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysGenerateVpnProfileFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vpr.Response.Response, err = future.GetResult(sender); err == nil && vpr.Response.Response.StatusCode != http.StatusNoContent { + vpr, err = client.GenerateVpnProfileResponder(vpr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", vpr.Response.Response, "Failure responding to request") + } + } + return +} + // P2sVpnGatewaysGetP2sVpnConnectionHealthFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type P2sVpnGatewaysGetP2sVpnConnectionHealthFuture struct { @@ -19292,6 +21588,39 @@ type P2sVpnGatewaysGetP2sVpnConnectionHealthFuture struct { Result func(P2sVpnGatewaysClient) (P2SVpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysGetP2sVpnConnectionHealthFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysGetP2sVpnConnectionHealthFuture.Result. +func (future *P2sVpnGatewaysGetP2sVpnConnectionHealthFuture) result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGetP2sVpnConnectionHealthFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysGetP2sVpnConnectionHealthFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pvg.Response.Response, err = future.GetResult(sender); err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { + pvg, err = client.GetP2sVpnConnectionHealthResponder(pvg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGetP2sVpnConnectionHealthFuture", "Result", pvg.Response.Response, "Failure responding to request") + } + } + return +} + // P2sVpnGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type P2sVpnGatewaysUpdateTagsFuture struct { @@ -19301,6 +21630,39 @@ type P2sVpnGatewaysUpdateTagsFuture struct { Result func(P2sVpnGatewaysClient) (P2SVpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnGatewaysUpdateTagsFuture.Result. +func (future *P2sVpnGatewaysUpdateTagsFuture) result(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pvg.Response.Response, err = future.GetResult(sender); err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { + pvg, err = client.UpdateTagsResponder(pvg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", pvg.Response.Response, "Failure responding to request") + } + } + return +} + // P2SVpnProfileParameters vpn Client Parameters for package generation. type P2SVpnProfileParameters struct { // AuthenticationMethod - VPN client authentication method. Possible values include: 'EAPTLS', 'EAPMSCHAPv2' @@ -19659,6 +22021,39 @@ type P2sVpnServerConfigurationsCreateOrUpdateFuture struct { Result func(P2sVpnServerConfigurationsClient) (P2SVpnServerConfiguration, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnServerConfigurationsCreateOrUpdateFuture.Result. +func (future *P2sVpnServerConfigurationsCreateOrUpdateFuture) result(client P2sVpnServerConfigurationsClient) (pvsc P2SVpnServerConfiguration, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pvsc.Response.Response, err = future.GetResult(sender); err == nil && pvsc.Response.Response.StatusCode != http.StatusNoContent { + pvsc, err = client.CreateOrUpdateResponder(pvsc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", pvsc.Response.Response, "Failure responding to request") + } + } + return +} + // P2sVpnServerConfigurationsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type P2sVpnServerConfigurationsDeleteFuture struct { @@ -19668,6 +22063,33 @@ type P2sVpnServerConfigurationsDeleteFuture struct { Result func(P2sVpnServerConfigurationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *P2sVpnServerConfigurationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for P2sVpnServerConfigurationsDeleteFuture.Result. +func (future *P2sVpnServerConfigurationsDeleteFuture) result(client P2sVpnServerConfigurationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // P2SVpnServerConfigVpnClientRevokedCertificate VPN client revoked certificate of // P2SVpnServerConfiguration. type P2SVpnServerConfigVpnClientRevokedCertificate struct { @@ -20063,6 +22485,39 @@ type PacketCapturesCreateFuture struct { Result func(PacketCapturesClient) (PacketCaptureResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesCreateFuture.Result. +func (future *PacketCapturesCreateFuture) result(client PacketCapturesClient) (pcr PacketCaptureResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pcr.Response.Response, err = future.GetResult(sender); err == nil && pcr.Response.Response.StatusCode != http.StatusNoContent { + pcr, err = client.CreateResponder(pcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", pcr.Response.Response, "Failure responding to request") + } + } + return +} + // PacketCapturesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PacketCapturesDeleteFuture struct { @@ -20072,6 +22527,33 @@ type PacketCapturesDeleteFuture struct { Result func(PacketCapturesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesDeleteFuture.Result. +func (future *PacketCapturesDeleteFuture) result(client PacketCapturesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PacketCapturesGetStatusFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PacketCapturesGetStatusFuture struct { @@ -20081,6 +22563,39 @@ type PacketCapturesGetStatusFuture struct { Result func(PacketCapturesClient) (PacketCaptureQueryStatusResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesGetStatusFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesGetStatusFuture.Result. +func (future *PacketCapturesGetStatusFuture) result(client PacketCapturesClient) (pcqsr PacketCaptureQueryStatusResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesGetStatusFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pcqsr.Response.Response, err = future.GetResult(sender); err == nil && pcqsr.Response.Response.StatusCode != http.StatusNoContent { + pcqsr, err = client.GetStatusResponder(pcqsr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", pcqsr.Response.Response, "Failure responding to request") + } + } + return +} + // PacketCapturesStopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PacketCapturesStopFuture struct { @@ -20090,6 +22605,33 @@ type PacketCapturesStopFuture struct { Result func(PacketCapturesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PacketCapturesStopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PacketCapturesStopFuture.Result. +func (future *PacketCapturesStopFuture) result(client PacketCapturesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PacketCapturesStopFuture") + return + } + ar.Response = future.Response() + return +} + // PacketCaptureStorageLocation describes the storage location for a packet capture session. type PacketCaptureStorageLocation struct { // StorageID - The ID of the storage account to save the packet capture session. Required if no local file path is provided. @@ -21015,6 +23557,39 @@ type PrivateEndpointsCreateOrUpdateFuture struct { Result func(PrivateEndpointsClient) (PrivateEndpoint, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateEndpointsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateEndpointsCreateOrUpdateFuture.Result. +func (future *PrivateEndpointsCreateOrUpdateFuture) result(client PrivateEndpointsClient) (peVar PrivateEndpoint, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateEndpointsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PrivateEndpointsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if peVar.Response.Response, err = future.GetResult(sender); err == nil && peVar.Response.Response.StatusCode != http.StatusNoContent { + peVar, err = client.CreateOrUpdateResponder(peVar.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateEndpointsCreateOrUpdateFuture", "Result", peVar.Response.Response, "Failure responding to request") + } + } + return +} + // PrivateEndpointsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PrivateEndpointsDeleteFuture struct { @@ -21024,6 +23599,33 @@ type PrivateEndpointsDeleteFuture struct { Result func(PrivateEndpointsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateEndpointsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateEndpointsDeleteFuture.Result. +func (future *PrivateEndpointsDeleteFuture) result(client PrivateEndpointsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateEndpointsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PrivateEndpointsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PrivateLinkService private link service resource. type PrivateLinkService struct { autorest.Response `json:"-"` @@ -21599,6 +24201,39 @@ type PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture s Result func(PrivateLinkServicesClient) (PrivateLinkServiceVisibility, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture.Result. +func (future *PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture) result(client PrivateLinkServicesClient) (plsv PrivateLinkServiceVisibility, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if plsv.Response.Response, err = future.GetResult(sender); err == nil && plsv.Response.Response.StatusCode != http.StatusNoContent { + plsv, err = client.CheckPrivateLinkServiceVisibilityByResourceGroupResponder(plsv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture", "Result", plsv.Response.Response, "Failure responding to request") + } + } + return +} + // PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture struct { @@ -21608,6 +24243,39 @@ type PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture struct { Result func(PrivateLinkServicesClient) (PrivateLinkServiceVisibility, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture.Result. +func (future *PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture) result(client PrivateLinkServicesClient) (plsv PrivateLinkServiceVisibility, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if plsv.Response.Response, err = future.GetResult(sender); err == nil && plsv.Response.Response.StatusCode != http.StatusNoContent { + plsv, err = client.CheckPrivateLinkServiceVisibilityResponder(plsv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture", "Result", plsv.Response.Response, "Failure responding to request") + } + } + return +} + // PrivateLinkServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type PrivateLinkServicesCreateOrUpdateFuture struct { @@ -21617,6 +24285,39 @@ type PrivateLinkServicesCreateOrUpdateFuture struct { Result func(PrivateLinkServicesClient) (PrivateLinkService, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateLinkServicesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateLinkServicesCreateOrUpdateFuture.Result. +func (future *PrivateLinkServicesCreateOrUpdateFuture) result(client PrivateLinkServicesClient) (pls PrivateLinkService, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pls.Response.Response, err = future.GetResult(sender); err == nil && pls.Response.Response.StatusCode != http.StatusNoContent { + pls, err = client.CreateOrUpdateResponder(pls.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCreateOrUpdateFuture", "Result", pls.Response.Response, "Failure responding to request") + } + } + return +} + // PrivateLinkServicesDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type PrivateLinkServicesDeleteFuture struct { @@ -21626,6 +24327,33 @@ type PrivateLinkServicesDeleteFuture struct { Result func(PrivateLinkServicesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateLinkServicesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateLinkServicesDeleteFuture.Result. +func (future *PrivateLinkServicesDeleteFuture) result(client PrivateLinkServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PrivateLinkServicesDeletePrivateEndpointConnectionFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type PrivateLinkServicesDeletePrivateEndpointConnectionFuture struct { @@ -21635,6 +24363,33 @@ type PrivateLinkServicesDeletePrivateEndpointConnectionFuture struct { Result func(PrivateLinkServicesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateLinkServicesDeletePrivateEndpointConnectionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateLinkServicesDeletePrivateEndpointConnectionFuture.Result. +func (future *PrivateLinkServicesDeletePrivateEndpointConnectionFuture) result(client PrivateLinkServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesDeletePrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesDeletePrivateEndpointConnectionFuture") + return + } + ar.Response = future.Response() + return +} + // PrivateLinkServiceVisibility response for the CheckPrivateLinkServiceVisibility API service call. type PrivateLinkServiceVisibility struct { autorest.Response `json:"-"` @@ -22087,6 +24842,33 @@ type ProfilesDeleteFuture struct { Result func(ProfilesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ProfilesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ProfilesDeleteFuture.Result. +func (future *ProfilesDeleteFuture) result(client ProfilesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ProfilesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ProfilesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ProtocolConfiguration configuration of the protocol. type ProtocolConfiguration struct { // HTTPConfiguration - HTTP configuration of the connectivity check. @@ -22270,6 +25052,39 @@ type PublicIPAddressesCreateOrUpdateFuture struct { Result func(PublicIPAddressesClient) (PublicIPAddress, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPAddressesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPAddressesCreateOrUpdateFuture.Result. +func (future *PublicIPAddressesCreateOrUpdateFuture) result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pia.Response.Response, err = future.GetResult(sender); err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { + pia, err = client.CreateOrUpdateResponder(pia.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", pia.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPAddressesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PublicIPAddressesDeleteFuture struct { @@ -22279,6 +25094,33 @@ type PublicIPAddressesDeleteFuture struct { Result func(PublicIPAddressesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPAddressesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPAddressesDeleteFuture.Result. +func (future *PublicIPAddressesDeleteFuture) result(client PublicIPAddressesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PublicIPAddressesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type PublicIPAddressesUpdateTagsFuture struct { @@ -22288,6 +25130,39 @@ type PublicIPAddressesUpdateTagsFuture struct { Result func(PublicIPAddressesClient) (PublicIPAddress, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPAddressesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPAddressesUpdateTagsFuture.Result. +func (future *PublicIPAddressesUpdateTagsFuture) result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pia.Response.Response, err = future.GetResult(sender); err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { + pia, err = client.UpdateTagsResponder(pia.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", pia.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPAddressListResult response for ListPublicIpAddresses API service call. type PublicIPAddressListResult struct { autorest.Response `json:"-"` @@ -22670,6 +25545,39 @@ type PublicIPPrefixesCreateOrUpdateFuture struct { Result func(PublicIPPrefixesClient) (PublicIPPrefix, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPPrefixesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPPrefixesCreateOrUpdateFuture.Result. +func (future *PublicIPPrefixesCreateOrUpdateFuture) result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pip.Response.Response, err = future.GetResult(sender); err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { + pip, err = client.CreateOrUpdateResponder(pip.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", pip.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPPrefixesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PublicIPPrefixesDeleteFuture struct { @@ -22679,6 +25587,33 @@ type PublicIPPrefixesDeleteFuture struct { Result func(PublicIPPrefixesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPPrefixesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPPrefixesDeleteFuture.Result. +func (future *PublicIPPrefixesDeleteFuture) result(client PublicIPPrefixesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PublicIPPrefixesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type PublicIPPrefixesUpdateTagsFuture struct { @@ -22688,6 +25623,39 @@ type PublicIPPrefixesUpdateTagsFuture struct { Result func(PublicIPPrefixesClient) (PublicIPPrefix, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PublicIPPrefixesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PublicIPPrefixesUpdateTagsFuture.Result. +func (future *PublicIPPrefixesUpdateTagsFuture) result(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pip.Response.Response, err = future.GetResult(sender); err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { + pip, err = client.UpdateTagsResponder(pip.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", pip.Response.Response, "Failure responding to request") + } + } + return +} + // PublicIPPrefixListResult response for ListPublicIpPrefixes API service call. type PublicIPPrefixListResult struct { autorest.Response `json:"-"` @@ -23755,6 +26723,39 @@ type RouteFilterRulesCreateOrUpdateFuture struct { Result func(RouteFilterRulesClient) (RouteFilterRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFilterRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFilterRulesCreateOrUpdateFuture.Result. +func (future *RouteFilterRulesCreateOrUpdateFuture) result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rfr.Response.Response, err = future.GetResult(sender); err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { + rfr, err = client.CreateOrUpdateResponder(rfr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") + } + } + return +} + // RouteFilterRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFilterRulesDeleteFuture struct { @@ -23764,6 +26765,33 @@ type RouteFilterRulesDeleteFuture struct { Result func(RouteFilterRulesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFilterRulesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFilterRulesDeleteFuture.Result. +func (future *RouteFilterRulesDeleteFuture) result(client RouteFilterRulesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteFilterRulesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFilterRulesUpdateFuture struct { @@ -23773,6 +26801,39 @@ type RouteFilterRulesUpdateFuture struct { Result func(RouteFilterRulesClient) (RouteFilterRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFilterRulesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFilterRulesUpdateFuture.Result. +func (future *RouteFilterRulesUpdateFuture) result(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rfr.Response.Response, err = future.GetResult(sender); err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { + rfr, err = client.UpdateResponder(rfr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") + } + } + return +} + // RouteFiltersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type RouteFiltersCreateOrUpdateFuture struct { @@ -23782,6 +26843,39 @@ type RouteFiltersCreateOrUpdateFuture struct { Result func(RouteFiltersClient) (RouteFilter, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFiltersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFiltersCreateOrUpdateFuture.Result. +func (future *RouteFiltersCreateOrUpdateFuture) result(client RouteFiltersClient) (rf RouteFilter, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFiltersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rf.Response.Response, err = future.GetResult(sender); err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { + rf, err = client.CreateOrUpdateResponder(rf.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") + } + } + return +} + // RouteFiltersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFiltersDeleteFuture struct { @@ -23791,6 +26885,33 @@ type RouteFiltersDeleteFuture struct { Result func(RouteFiltersClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFiltersDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFiltersDeleteFuture.Result. +func (future *RouteFiltersDeleteFuture) result(client RouteFiltersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFiltersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteFiltersUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteFiltersUpdateFuture struct { @@ -23800,6 +26921,39 @@ type RouteFiltersUpdateFuture struct { Result func(RouteFiltersClient) (RouteFilter, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteFiltersUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteFiltersUpdateFuture.Result. +func (future *RouteFiltersUpdateFuture) result(client RouteFiltersClient) (rf RouteFilter, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteFiltersUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rf.Response.Response, err = future.GetResult(sender); err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { + rf, err = client.UpdateResponder(rf.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") + } + } + return +} + // RouteListResult response for the ListRoute API service call. type RouteListResult struct { autorest.Response `json:"-"` @@ -23980,6 +27134,39 @@ type RoutesCreateOrUpdateFuture struct { Result func(RoutesClient) (Route, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RoutesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RoutesCreateOrUpdateFuture.Result. +func (future *RoutesCreateOrUpdateFuture) result(client RoutesClient) (r Route, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RoutesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.CreateOrUpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // RoutesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type RoutesDeleteFuture struct { azure.FutureAPI @@ -23988,6 +27175,33 @@ type RoutesDeleteFuture struct { Result func(RoutesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RoutesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RoutesDeleteFuture.Result. +func (future *RoutesDeleteFuture) result(client RoutesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RoutesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteTable route table resource. type RouteTable struct { autorest.Response `json:"-"` @@ -24301,6 +27515,39 @@ type RouteTablesCreateOrUpdateFuture struct { Result func(RouteTablesClient) (RouteTable, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteTablesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteTablesCreateOrUpdateFuture.Result. +func (future *RouteTablesCreateOrUpdateFuture) result(client RouteTablesClient) (rt RouteTable, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rt.Response.Response, err = future.GetResult(sender); err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { + rt, err = client.CreateOrUpdateResponder(rt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", rt.Response.Response, "Failure responding to request") + } + } + return +} + // RouteTablesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteTablesDeleteFuture struct { @@ -24310,6 +27557,33 @@ type RouteTablesDeleteFuture struct { Result func(RouteTablesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteTablesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteTablesDeleteFuture.Result. +func (future *RouteTablesDeleteFuture) result(client RouteTablesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RouteTablesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RouteTablesUpdateTagsFuture struct { @@ -24319,6 +27593,39 @@ type RouteTablesUpdateTagsFuture struct { Result func(RouteTablesClient) (RouteTable, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RouteTablesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RouteTablesUpdateTagsFuture.Result. +func (future *RouteTablesUpdateTagsFuture) result(client RouteTablesClient) (rt RouteTable, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rt.Response.Response, err = future.GetResult(sender); err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { + rt, err = client.UpdateTagsResponder(rt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", rt.Response.Response, "Failure responding to request") + } + } + return +} + // RuleCondition rule condition of type network. type RuleCondition struct { // IPProtocols - Array of FirewallPolicyRuleConditionNetworkProtocols. @@ -24730,6 +28037,39 @@ type SecurityGroupsCreateOrUpdateFuture struct { Result func(SecurityGroupsClient) (SecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityGroupsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityGroupsCreateOrUpdateFuture.Result. +func (future *SecurityGroupsCreateOrUpdateFuture) result(client SecurityGroupsClient) (sg SecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sg.Response.Response, err = future.GetResult(sender); err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { + sg, err = client.CreateOrUpdateResponder(sg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", sg.Response.Response, "Failure responding to request") + } + } + return +} + // SecurityGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SecurityGroupsDeleteFuture struct { @@ -24739,6 +28079,33 @@ type SecurityGroupsDeleteFuture struct { Result func(SecurityGroupsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityGroupsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityGroupsDeleteFuture.Result. +func (future *SecurityGroupsDeleteFuture) result(client SecurityGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // SecurityGroupsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SecurityGroupsUpdateTagsFuture struct { @@ -24748,6 +28115,39 @@ type SecurityGroupsUpdateTagsFuture struct { Result func(SecurityGroupsClient) (SecurityGroup, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityGroupsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityGroupsUpdateTagsFuture.Result. +func (future *SecurityGroupsUpdateTagsFuture) result(client SecurityGroupsClient) (sg SecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sg.Response.Response, err = future.GetResult(sender); err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { + sg, err = client.UpdateTagsResponder(sg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", sg.Response.Response, "Failure responding to request") + } + } + return +} + // SecurityGroupViewParameters parameters that define the VM to check security groups for. type SecurityGroupViewParameters struct { // TargetResourceID - ID of the target VM. @@ -25060,6 +28460,39 @@ type SecurityRulesCreateOrUpdateFuture struct { Result func(SecurityRulesClient) (SecurityRule, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityRulesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityRulesCreateOrUpdateFuture.Result. +func (future *SecurityRulesCreateOrUpdateFuture) result(client SecurityRulesClient) (sr SecurityRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sr.Response.Response, err = future.GetResult(sender); err == nil && sr.Response.Response.StatusCode != http.StatusNoContent { + sr, err = client.CreateOrUpdateResponder(sr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", sr.Response.Response, "Failure responding to request") + } + } + return +} + // SecurityRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SecurityRulesDeleteFuture struct { @@ -25069,6 +28502,33 @@ type SecurityRulesDeleteFuture struct { Result func(SecurityRulesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SecurityRulesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SecurityRulesDeleteFuture.Result. +func (future *SecurityRulesDeleteFuture) result(client SecurityRulesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityRulesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // SecurityRulesEvaluationResult network security rules evaluation result. type SecurityRulesEvaluationResult struct { // Name - Name of the network security rule. @@ -25258,6 +28718,39 @@ type ServiceEndpointPoliciesCreateOrUpdateFuture struct { Result func(ServiceEndpointPoliciesClient) (ServiceEndpointPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPoliciesCreateOrUpdateFuture.Result. +func (future *ServiceEndpointPoliciesCreateOrUpdateFuture) result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sep.Response.Response, err = future.GetResult(sender); err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { + sep, err = client.CreateOrUpdateResponder(sep.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") + } + } + return +} + // ServiceEndpointPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ServiceEndpointPoliciesDeleteFuture struct { @@ -25267,6 +28760,33 @@ type ServiceEndpointPoliciesDeleteFuture struct { Result func(ServiceEndpointPoliciesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPoliciesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPoliciesDeleteFuture.Result. +func (future *ServiceEndpointPoliciesDeleteFuture) result(client ServiceEndpointPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ServiceEndpointPoliciesUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ServiceEndpointPoliciesUpdateFuture struct { @@ -25276,6 +28796,39 @@ type ServiceEndpointPoliciesUpdateFuture struct { Result func(ServiceEndpointPoliciesClient) (ServiceEndpointPolicy, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPoliciesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPoliciesUpdateFuture.Result. +func (future *ServiceEndpointPoliciesUpdateFuture) result(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sep.Response.Response, err = future.GetResult(sender); err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { + sep, err = client.UpdateResponder(sep.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") + } + } + return +} + // ServiceEndpointPolicy service End point policy resource. type ServiceEndpointPolicy struct { autorest.Response `json:"-"` @@ -25673,6 +29226,39 @@ type ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture struct { Result func(ServiceEndpointPolicyDefinitionsClient) (ServiceEndpointPolicyDefinition, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture.Result. +func (future *ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture) result(client ServiceEndpointPolicyDefinitionsClient) (sepd ServiceEndpointPolicyDefinition, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sepd.Response.Response, err = future.GetResult(sender); err == nil && sepd.Response.Response.StatusCode != http.StatusNoContent { + sepd, err = client.CreateOrUpdateResponder(sepd.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", sepd.Response.Response, "Failure responding to request") + } + } + return +} + // ServiceEndpointPolicyDefinitionsDeleteFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type ServiceEndpointPolicyDefinitionsDeleteFuture struct { @@ -25682,6 +29268,33 @@ type ServiceEndpointPolicyDefinitionsDeleteFuture struct { Result func(ServiceEndpointPolicyDefinitionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ServiceEndpointPolicyDefinitionsDeleteFuture.Result. +func (future *ServiceEndpointPolicyDefinitionsDeleteFuture) result(client ServiceEndpointPolicyDefinitionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ServiceEndpointPolicyListResult response for ListServiceEndpointPolicies API service call. type ServiceEndpointPolicyListResult struct { autorest.Response `json:"-"` @@ -26278,6 +29891,39 @@ type SubnetsCreateOrUpdateFuture struct { Result func(SubnetsClient) (Subnet, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SubnetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SubnetsCreateOrUpdateFuture.Result. +func (future *SubnetsCreateOrUpdateFuture) result(client SubnetsClient) (s Subnet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.CreateOrUpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // SubnetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SubnetsDeleteFuture struct { @@ -26287,6 +29933,33 @@ type SubnetsDeleteFuture struct { Result func(SubnetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SubnetsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SubnetsDeleteFuture.Result. +func (future *SubnetsDeleteFuture) result(client SubnetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // SubnetsPrepareNetworkPoliciesFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SubnetsPrepareNetworkPoliciesFuture struct { @@ -26296,6 +29969,33 @@ type SubnetsPrepareNetworkPoliciesFuture struct { Result func(SubnetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SubnetsPrepareNetworkPoliciesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SubnetsPrepareNetworkPoliciesFuture.Result. +func (future *SubnetsPrepareNetworkPoliciesFuture) result(client SubnetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsPrepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsPrepareNetworkPoliciesFuture") + return + } + ar.Response = future.Response() + return +} + // SubnetsUnprepareNetworkPoliciesFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type SubnetsUnprepareNetworkPoliciesFuture struct { @@ -26305,6 +30005,33 @@ type SubnetsUnprepareNetworkPoliciesFuture struct { Result func(SubnetsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *SubnetsUnprepareNetworkPoliciesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for SubnetsUnprepareNetworkPoliciesFuture.Result. +func (future *SubnetsUnprepareNetworkPoliciesFuture) result(client SubnetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsUnprepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsUnprepareNetworkPoliciesFuture") + return + } + ar.Response = future.Response() + return +} + // SubResource reference to another subresource. type SubResource struct { // ID - Resource ID. @@ -26920,6 +30647,39 @@ type VirtualHubsCreateOrUpdateFuture struct { Result func(VirtualHubsClient) (VirtualHub, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualHubsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualHubsCreateOrUpdateFuture.Result. +func (future *VirtualHubsCreateOrUpdateFuture) result(client VirtualHubsClient) (vh VirtualHub, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualHubsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vh.Response.Response, err = future.GetResult(sender); err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { + vh, err = client.CreateOrUpdateResponder(vh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", vh.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualHubsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualHubsDeleteFuture struct { @@ -26929,6 +30689,33 @@ type VirtualHubsDeleteFuture struct { Result func(VirtualHubsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualHubsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualHubsDeleteFuture.Result. +func (future *VirtualHubsDeleteFuture) result(client VirtualHubsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualHubsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualHubsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualHubsUpdateTagsFuture struct { @@ -26938,6 +30725,39 @@ type VirtualHubsUpdateTagsFuture struct { Result func(VirtualHubsClient) (VirtualHub, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualHubsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualHubsUpdateTagsFuture.Result. +func (future *VirtualHubsUpdateTagsFuture) result(client VirtualHubsClient) (vh VirtualHub, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualHubsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vh.Response.Response, err = future.GetResult(sender); err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { + vh, err = client.UpdateTagsResponder(vh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", vh.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetwork virtual Network resource. type VirtualNetwork struct { autorest.Response `json:"-"` @@ -27785,6 +31605,39 @@ type VirtualNetworkGatewayConnectionsCreateOrUpdateFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (VirtualNetworkGatewayConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsCreateOrUpdateFuture.Result. +func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vngc.Response.Response, err = future.GetResult(sender); err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { + vngc, err = client.CreateOrUpdateResponder(vngc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", vngc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualNetworkGatewayConnectionsDeleteFuture struct { @@ -27794,6 +31647,33 @@ type VirtualNetworkGatewayConnectionsDeleteFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsDeleteFuture.Result. +func (future *VirtualNetworkGatewayConnectionsDeleteFuture) result(client VirtualNetworkGatewayConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkGatewayConnectionsResetSharedKeyFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewayConnectionsResetSharedKeyFuture struct { @@ -27803,6 +31683,39 @@ type VirtualNetworkGatewayConnectionsResetSharedKeyFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (ConnectionResetSharedKey, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsResetSharedKeyFuture.Result. +func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) result(client VirtualNetworkGatewayConnectionsClient) (crsk ConnectionResetSharedKey, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if crsk.Response.Response, err = future.GetResult(sender); err == nil && crsk.Response.Response.StatusCode != http.StatusNoContent { + crsk, err = client.ResetSharedKeyResponder(crsk.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", crsk.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsSetSharedKeyFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewayConnectionsSetSharedKeyFuture struct { @@ -27812,6 +31725,39 @@ type VirtualNetworkGatewayConnectionsSetSharedKeyFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (ConnectionSharedKey, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsSetSharedKeyFuture.Result. +func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) result(client VirtualNetworkGatewayConnectionsClient) (csk ConnectionSharedKey, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if csk.Response.Response, err = future.GetResult(sender); err == nil && csk.Response.Response.StatusCode != http.StatusNoContent { + csk, err = client.SetSharedKeyResponder(csk.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", csk.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsStartPacketCaptureFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type VirtualNetworkGatewayConnectionsStartPacketCaptureFuture struct { @@ -27821,6 +31767,39 @@ type VirtualNetworkGatewayConnectionsStartPacketCaptureFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsStartPacketCaptureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsStartPacketCaptureFuture.Result. +func (future *VirtualNetworkGatewayConnectionsStartPacketCaptureFuture) result(client VirtualNetworkGatewayConnectionsClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStartPacketCaptureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsStartPacketCaptureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.StartPacketCaptureResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStartPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsStopPacketCaptureFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewayConnectionsStopPacketCaptureFuture struct { @@ -27830,6 +31809,39 @@ type VirtualNetworkGatewayConnectionsStopPacketCaptureFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsStopPacketCaptureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsStopPacketCaptureFuture.Result. +func (future *VirtualNetworkGatewayConnectionsStopPacketCaptureFuture) result(client VirtualNetworkGatewayConnectionsClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStopPacketCaptureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsStopPacketCaptureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.StopPacketCaptureResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStopPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayConnectionsUpdateTagsFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewayConnectionsUpdateTagsFuture struct { @@ -27839,6 +31851,39 @@ type VirtualNetworkGatewayConnectionsUpdateTagsFuture struct { Result func(VirtualNetworkGatewayConnectionsClient) (VirtualNetworkGatewayConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewayConnectionsUpdateTagsFuture.Result. +func (future *VirtualNetworkGatewayConnectionsUpdateTagsFuture) result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vngc.Response.Response, err = future.GetResult(sender); err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { + vngc, err = client.UpdateTagsResponder(vngc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", vngc.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewayIPConfiguration IP configuration for virtual network gateway. type VirtualNetworkGatewayIPConfiguration struct { // VirtualNetworkGatewayIPConfigurationPropertiesFormat - Properties of the virtual network gateway ip configuration. @@ -28368,6 +32413,39 @@ type VirtualNetworkGatewaysCreateOrUpdateFuture struct { Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysCreateOrUpdateFuture.Result. +func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.CreateOrUpdateResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkGatewaysDeleteFuture struct { @@ -28377,6 +32455,33 @@ type VirtualNetworkGatewaysDeleteFuture struct { Result func(VirtualNetworkGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysDeleteFuture.Result. +func (future *VirtualNetworkGatewaysDeleteFuture) result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkGatewaysGeneratevpnclientpackageFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysGeneratevpnclientpackageFuture struct { @@ -28386,6 +32491,39 @@ type VirtualNetworkGatewaysGeneratevpnclientpackageFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGeneratevpnclientpackageFuture.Result. +func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.GeneratevpnclientpackageResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGenerateVpnProfileFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualNetworkGatewaysGenerateVpnProfileFuture struct { @@ -28395,6 +32533,39 @@ type VirtualNetworkGatewaysGenerateVpnProfileFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGenerateVpnProfileFuture.Result. +func (future *VirtualNetworkGatewaysGenerateVpnProfileFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGenerateVpnProfileFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.GenerateVpnProfileResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetAdvertisedRoutesFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualNetworkGatewaysGetAdvertisedRoutesFuture struct { @@ -28404,6 +32575,39 @@ type VirtualNetworkGatewaysGetAdvertisedRoutesFuture struct { Result func(VirtualNetworkGatewaysClient) (GatewayRouteListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetAdvertisedRoutesFuture.Result. +func (future *VirtualNetworkGatewaysGetAdvertisedRoutesFuture) result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if grlr.Response.Response, err = future.GetResult(sender); err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { + grlr, err = client.GetAdvertisedRoutesResponder(grlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetBgpPeerStatusFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualNetworkGatewaysGetBgpPeerStatusFuture struct { @@ -28413,6 +32617,39 @@ type VirtualNetworkGatewaysGetBgpPeerStatusFuture struct { Result func(VirtualNetworkGatewaysClient) (BgpPeerStatusListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetBgpPeerStatusFuture.Result. +func (future *VirtualNetworkGatewaysGetBgpPeerStatusFuture) result(client VirtualNetworkGatewaysClient) (bpslr BgpPeerStatusListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetBgpPeerStatusFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bpslr.Response.Response, err = future.GetResult(sender); err == nil && bpslr.Response.Response.StatusCode != http.StatusNoContent { + bpslr, err = client.GetBgpPeerStatusResponder(bpslr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", bpslr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetLearnedRoutesFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type VirtualNetworkGatewaysGetLearnedRoutesFuture struct { @@ -28422,6 +32659,39 @@ type VirtualNetworkGatewaysGetLearnedRoutesFuture struct { Result func(VirtualNetworkGatewaysClient) (GatewayRouteListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetLearnedRoutesFuture.Result. +func (future *VirtualNetworkGatewaysGetLearnedRoutesFuture) result(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetLearnedRoutesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if grlr.Response.Response, err = future.GetResult(sender); err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { + grlr, err = client.GetLearnedRoutesResponder(grlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture an abstraction for monitoring and retrieving // the results of a long-running operation. type VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture struct { @@ -28431,6 +32701,39 @@ type VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture struct { Result func(VirtualNetworkGatewaysClient) (VpnClientConnectionHealthDetailListResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture.Result. +func (future *VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture) result(client VirtualNetworkGatewaysClient) (vcchdlr VpnClientConnectionHealthDetailListResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vcchdlr.Response.Response, err = future.GetResult(sender); err == nil && vcchdlr.Response.Response.StatusCode != http.StatusNoContent { + vcchdlr, err = client.GetVpnclientConnectionHealthResponder(vcchdlr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture", "Result", vcchdlr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture struct { @@ -28440,6 +32743,39 @@ type VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture struct { Result func(VirtualNetworkGatewaysClient) (VpnClientIPsecParameters, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture.Result. +func (future *VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture) result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vcipp.Response.Response, err = future.GetResult(sender); err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { + vcipp, err = client.GetVpnclientIpsecParametersResponder(vcipp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysGetVpnProfilePackageURLFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysGetVpnProfilePackageURLFuture struct { @@ -28449,6 +32785,39 @@ type VirtualNetworkGatewaysGetVpnProfilePackageURLFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysGetVpnProfilePackageURLFuture.Result. +func (future *VirtualNetworkGatewaysGetVpnProfilePackageURLFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.GetVpnProfilePackageURLResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaySku virtualNetworkGatewaySku details. type VirtualNetworkGatewaySku struct { // Name - Gateway SKU name. Possible values include: 'VirtualNetworkGatewaySkuNameBasic', 'VirtualNetworkGatewaySkuNameHighPerformance', 'VirtualNetworkGatewaySkuNameStandard', 'VirtualNetworkGatewaySkuNameUltraPerformance', 'VirtualNetworkGatewaySkuNameVpnGw1', 'VirtualNetworkGatewaySkuNameVpnGw2', 'VirtualNetworkGatewaySkuNameVpnGw3', 'VirtualNetworkGatewaySkuNameVpnGw4', 'VirtualNetworkGatewaySkuNameVpnGw5', 'VirtualNetworkGatewaySkuNameVpnGw1AZ', 'VirtualNetworkGatewaySkuNameVpnGw2AZ', 'VirtualNetworkGatewaySkuNameVpnGw3AZ', 'VirtualNetworkGatewaySkuNameVpnGw4AZ', 'VirtualNetworkGatewaySkuNameVpnGw5AZ', 'VirtualNetworkGatewaySkuNameErGw1AZ', 'VirtualNetworkGatewaySkuNameErGw2AZ', 'VirtualNetworkGatewaySkuNameErGw3AZ' @@ -28468,6 +32837,39 @@ type VirtualNetworkGatewaysResetFuture struct { Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysResetFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysResetFuture.Result. +func (future *VirtualNetworkGatewaysResetFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.ResetResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysResetVpnClientSharedKeyFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysResetVpnClientSharedKeyFuture struct { @@ -28477,6 +32879,33 @@ type VirtualNetworkGatewaysResetVpnClientSharedKeyFuture struct { Result func(VirtualNetworkGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysResetVpnClientSharedKeyFuture.Result. +func (future *VirtualNetworkGatewaysResetVpnClientSharedKeyFuture) result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture struct { @@ -28486,6 +32915,39 @@ type VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture struct { Result func(VirtualNetworkGatewaysClient) (VpnClientIPsecParameters, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture.Result. +func (future *VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture) result(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vcipp.Response.Response, err = future.GetResult(sender); err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { + vcipp, err = client.SetVpnclientIpsecParametersResponder(vcipp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysStartPacketCaptureFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualNetworkGatewaysStartPacketCaptureFuture struct { @@ -28495,6 +32957,39 @@ type VirtualNetworkGatewaysStartPacketCaptureFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysStartPacketCaptureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysStartPacketCaptureFuture.Result. +func (future *VirtualNetworkGatewaysStartPacketCaptureFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStartPacketCaptureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysStartPacketCaptureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.StartPacketCaptureResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStartPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysStopPacketCaptureFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type VirtualNetworkGatewaysStopPacketCaptureFuture struct { @@ -28504,6 +32999,39 @@ type VirtualNetworkGatewaysStopPacketCaptureFuture struct { Result func(VirtualNetworkGatewaysClient) (String, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysStopPacketCaptureFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysStopPacketCaptureFuture.Result. +func (future *VirtualNetworkGatewaysStopPacketCaptureFuture) result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStopPacketCaptureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysStopPacketCaptureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.StopPacketCaptureResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStopPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkGatewaysUpdateTagsFuture struct { @@ -28513,6 +33041,39 @@ type VirtualNetworkGatewaysUpdateTagsFuture struct { Result func(VirtualNetworkGatewaysClient) (VirtualNetworkGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkGatewaysUpdateTagsFuture.Result. +func (future *VirtualNetworkGatewaysUpdateTagsFuture) result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.UpdateTagsResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkListResult response for the ListVirtualNetworks API service call. type VirtualNetworkListResult struct { autorest.Response `json:"-"` @@ -29113,6 +33674,39 @@ type VirtualNetworkPeeringsCreateOrUpdateFuture struct { Result func(VirtualNetworkPeeringsClient) (VirtualNetworkPeering, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkPeeringsCreateOrUpdateFuture.Result. +func (future *VirtualNetworkPeeringsCreateOrUpdateFuture) result(client VirtualNetworkPeeringsClient) (vnp VirtualNetworkPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnp.Response.Response, err = future.GetResult(sender); err == nil && vnp.Response.Response.StatusCode != http.StatusNoContent { + vnp, err = client.CreateOrUpdateResponder(vnp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", vnp.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkPeeringsDeleteFuture struct { @@ -29122,6 +33716,33 @@ type VirtualNetworkPeeringsDeleteFuture struct { Result func(VirtualNetworkPeeringsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkPeeringsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkPeeringsDeleteFuture.Result. +func (future *VirtualNetworkPeeringsDeleteFuture) result(client VirtualNetworkPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkPropertiesFormat properties of the virtual network. type VirtualNetworkPropertiesFormat struct { // AddressSpace - The AddressSpace that contains an array of IP address ranges that can be used by subnets. @@ -29153,6 +33774,39 @@ type VirtualNetworksCreateOrUpdateFuture struct { Result func(VirtualNetworksClient) (VirtualNetwork, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworksCreateOrUpdateFuture.Result. +func (future *VirtualNetworksCreateOrUpdateFuture) result(client VirtualNetworksClient) (vn VirtualNetwork, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vn.Response.Response, err = future.GetResult(sender); err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { + vn, err = client.CreateOrUpdateResponder(vn.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", vn.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualNetworksDeleteFuture struct { @@ -29162,6 +33816,33 @@ type VirtualNetworksDeleteFuture struct { Result func(VirtualNetworksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworksDeleteFuture.Result. +func (future *VirtualNetworksDeleteFuture) result(client VirtualNetworksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworksUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworksUpdateTagsFuture struct { @@ -29171,6 +33852,39 @@ type VirtualNetworksUpdateTagsFuture struct { Result func(VirtualNetworksClient) (VirtualNetwork, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworksUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworksUpdateTagsFuture.Result. +func (future *VirtualNetworksUpdateTagsFuture) result(client VirtualNetworksClient) (vn VirtualNetwork, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vn.Response.Response, err = future.GetResult(sender); err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { + vn, err = client.UpdateTagsResponder(vn.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", vn.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkTap virtual Network Tap resource. type VirtualNetworkTap struct { autorest.Response `json:"-"` @@ -29488,6 +34202,39 @@ type VirtualNetworkTapsCreateOrUpdateFuture struct { Result func(VirtualNetworkTapsClient) (VirtualNetworkTap, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkTapsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkTapsCreateOrUpdateFuture.Result. +func (future *VirtualNetworkTapsCreateOrUpdateFuture) result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnt.Response.Response, err = future.GetResult(sender); err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { + vnt, err = client.CreateOrUpdateResponder(vnt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", vnt.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkTapsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkTapsDeleteFuture struct { @@ -29497,6 +34244,33 @@ type VirtualNetworkTapsDeleteFuture struct { Result func(VirtualNetworkTapsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkTapsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkTapsDeleteFuture.Result. +func (future *VirtualNetworkTapsDeleteFuture) result(client VirtualNetworkTapsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkTapsUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkTapsUpdateTagsFuture struct { @@ -29506,6 +34280,39 @@ type VirtualNetworkTapsUpdateTagsFuture struct { Result func(VirtualNetworkTapsClient) (VirtualNetworkTap, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkTapsUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkTapsUpdateTagsFuture.Result. +func (future *VirtualNetworkTapsUpdateTagsFuture) result(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnt.Response.Response, err = future.GetResult(sender); err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { + vnt, err = client.UpdateTagsResponder(vnt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", vnt.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkUsage usage details for subnet. type VirtualNetworkUsage struct { // CurrentValue - READ-ONLY; Indicates number of IPs used from the Subnet. @@ -30083,6 +34890,39 @@ type VirtualRouterPeeringsCreateOrUpdateFuture struct { Result func(VirtualRouterPeeringsClient) (VirtualRouterPeering, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualRouterPeeringsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualRouterPeeringsCreateOrUpdateFuture.Result. +func (future *VirtualRouterPeeringsCreateOrUpdateFuture) result(client VirtualRouterPeeringsClient) (vrp VirtualRouterPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualRouterPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualRouterPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vrp.Response.Response, err = future.GetResult(sender); err == nil && vrp.Response.Response.StatusCode != http.StatusNoContent { + vrp, err = client.CreateOrUpdateResponder(vrp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualRouterPeeringsCreateOrUpdateFuture", "Result", vrp.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualRouterPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualRouterPeeringsDeleteFuture struct { @@ -30092,6 +34932,33 @@ type VirtualRouterPeeringsDeleteFuture struct { Result func(VirtualRouterPeeringsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualRouterPeeringsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualRouterPeeringsDeleteFuture.Result. +func (future *VirtualRouterPeeringsDeleteFuture) result(client VirtualRouterPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualRouterPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualRouterPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualRouterPropertiesFormat virtual Router definition type VirtualRouterPropertiesFormat struct { // VirtualRouterAsn - VirtualRouter ASN. @@ -30135,6 +35002,39 @@ type VirtualRoutersCreateOrUpdateFuture struct { Result func(VirtualRoutersClient) (VirtualRouter, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualRoutersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualRoutersCreateOrUpdateFuture.Result. +func (future *VirtualRoutersCreateOrUpdateFuture) result(client VirtualRoutersClient) (vr VirtualRouter, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualRoutersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualRoutersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vr.Response.Response, err = future.GetResult(sender); err == nil && vr.Response.Response.StatusCode != http.StatusNoContent { + vr, err = client.CreateOrUpdateResponder(vr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualRoutersCreateOrUpdateFuture", "Result", vr.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualRoutersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualRoutersDeleteFuture struct { @@ -30144,6 +35044,33 @@ type VirtualRoutersDeleteFuture struct { Result func(VirtualRoutersClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualRoutersDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualRoutersDeleteFuture.Result. +func (future *VirtualRoutersDeleteFuture) result(client VirtualRoutersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualRoutersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualRoutersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualWAN virtualWAN Resource. type VirtualWAN struct { autorest.Response `json:"-"` @@ -30317,6 +35244,39 @@ type VirtualWansCreateOrUpdateFuture struct { Result func(VirtualWansClient) (VirtualWAN, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualWansCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualWansCreateOrUpdateFuture.Result. +func (future *VirtualWansCreateOrUpdateFuture) result(client VirtualWansClient) (vw VirtualWAN, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualWansCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vw.Response.Response, err = future.GetResult(sender); err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { + vw, err = client.CreateOrUpdateResponder(vw.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", vw.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualWansDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualWansDeleteFuture struct { @@ -30326,6 +35286,33 @@ type VirtualWansDeleteFuture struct { Result func(VirtualWansClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualWansDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualWansDeleteFuture.Result. +func (future *VirtualWansDeleteFuture) result(client VirtualWansClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualWansDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualWanSecurityProvider collection of SecurityProviders. type VirtualWanSecurityProvider struct { // Name - Name of the security provider. @@ -30352,6 +35339,39 @@ type VirtualWansUpdateTagsFuture struct { Result func(VirtualWansClient) (VirtualWAN, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualWansUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualWansUpdateTagsFuture.Result. +func (future *VirtualWansUpdateTagsFuture) result(client VirtualWansClient) (vw VirtualWAN, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualWansUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vw.Response.Response, err = future.GetResult(sender); err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { + vw, err = client.UpdateTagsResponder(vw.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", vw.Response.Response, "Failure responding to request") + } + } + return +} + // VpnClientConfiguration vpnClientConfiguration for P2S client. type VpnClientConfiguration struct { // VpnClientAddressPool - The reference of the address space resource which represents Address space for P2S VpnClient. @@ -30837,6 +35857,39 @@ type VpnConnectionsCreateOrUpdateFuture struct { Result func(VpnConnectionsClient) (VpnConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnConnectionsCreateOrUpdateFuture.Result. +func (future *VpnConnectionsCreateOrUpdateFuture) result(client VpnConnectionsClient) (vc VpnConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vc.Response.Response, err = future.GetResult(sender); err == nil && vc.Response.Response.StatusCode != http.StatusNoContent { + vc, err = client.CreateOrUpdateResponder(vc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", vc.Response.Response, "Failure responding to request") + } + } + return +} + // VpnConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnConnectionsDeleteFuture struct { @@ -30846,6 +35899,33 @@ type VpnConnectionsDeleteFuture struct { Result func(VpnConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnConnectionsDeleteFuture.Result. +func (future *VpnConnectionsDeleteFuture) result(client VpnConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VpnDeviceScriptParameters vpn device configuration script generation parameters. type VpnDeviceScriptParameters struct { // Vendor - The vendor for the vpn device. @@ -30994,6 +36074,39 @@ type VpnGatewaysCreateOrUpdateFuture struct { Result func(VpnGatewaysClient) (VpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnGatewaysCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnGatewaysCreateOrUpdateFuture.Result. +func (future *VpnGatewaysCreateOrUpdateFuture) result(client VpnGatewaysClient) (vg VpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vg.Response.Response, err = future.GetResult(sender); err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { + vg, err = client.CreateOrUpdateResponder(vg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", vg.Response.Response, "Failure responding to request") + } + } + return +} + // VpnGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnGatewaysDeleteFuture struct { @@ -31003,6 +36116,33 @@ type VpnGatewaysDeleteFuture struct { Result func(VpnGatewaysClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnGatewaysDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnGatewaysDeleteFuture.Result. +func (future *VpnGatewaysDeleteFuture) result(client VpnGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VpnGatewaysResetFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnGatewaysResetFuture struct { @@ -31012,6 +36152,39 @@ type VpnGatewaysResetFuture struct { Result func(VpnGatewaysClient) (VpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnGatewaysResetFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnGatewaysResetFuture.Result. +func (future *VpnGatewaysResetFuture) result(client VpnGatewaysClient) (vg VpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysResetFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysResetFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vg.Response.Response, err = future.GetResult(sender); err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { + vg, err = client.ResetResponder(vg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysResetFuture", "Result", vg.Response.Response, "Failure responding to request") + } + } + return +} + // VpnGatewaysUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnGatewaysUpdateTagsFuture struct { @@ -31021,6 +36194,39 @@ type VpnGatewaysUpdateTagsFuture struct { Result func(VpnGatewaysClient) (VpnGateway, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnGatewaysUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnGatewaysUpdateTagsFuture.Result. +func (future *VpnGatewaysUpdateTagsFuture) result(client VpnGatewaysClient) (vg VpnGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vg.Response.Response, err = future.GetResult(sender); err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { + vg, err = client.UpdateTagsResponder(vg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", vg.Response.Response, "Failure responding to request") + } + } + return +} + // VpnLinkBgpSettings BGP settings details for a link. type VpnLinkBgpSettings struct { // Asn - The BGP speaker's ASN. @@ -31474,6 +36680,33 @@ type VpnSitesConfigurationDownloadFuture struct { Result func(VpnSitesConfigurationClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesConfigurationDownloadFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesConfigurationDownloadFuture.Result. +func (future *VpnSitesConfigurationDownloadFuture) result(client VpnSitesConfigurationClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesConfigurationDownloadFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesConfigurationDownloadFuture") + return + } + ar.Response = future.Response() + return +} + // VpnSitesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnSitesCreateOrUpdateFuture struct { @@ -31483,6 +36716,39 @@ type VpnSitesCreateOrUpdateFuture struct { Result func(VpnSitesClient) (VpnSite, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesCreateOrUpdateFuture.Result. +func (future *VpnSitesCreateOrUpdateFuture) result(client VpnSitesClient) (vs VpnSite, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vs.Response.Response, err = future.GetResult(sender); err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { + vs, err = client.CreateOrUpdateResponder(vs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", vs.Response.Response, "Failure responding to request") + } + } + return +} + // VpnSitesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnSitesDeleteFuture struct { @@ -31492,6 +36758,33 @@ type VpnSitesDeleteFuture struct { Result func(VpnSitesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesDeleteFuture.Result. +func (future *VpnSitesDeleteFuture) result(client VpnSitesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VpnSitesUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VpnSitesUpdateTagsFuture struct { @@ -31501,6 +36794,39 @@ type VpnSitesUpdateTagsFuture struct { Result func(VpnSitesClient) (VpnSite, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VpnSitesUpdateTagsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VpnSitesUpdateTagsFuture.Result. +func (future *VpnSitesUpdateTagsFuture) result(client VpnSitesClient) (vs VpnSite, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VpnSitesUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vs.Response.Response, err = future.GetResult(sender); err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { + vs, err = client.UpdateTagsResponder(vs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", vs.Response.Response, "Failure responding to request") + } + } + return +} + // Watcher network watcher in a resource group. type Watcher struct { autorest.Response `json:"-"` @@ -31641,6 +36967,39 @@ type WatchersCheckConnectivityFuture struct { Result func(WatchersClient) (ConnectivityInformation, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersCheckConnectivityFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersCheckConnectivityFuture.Result. +func (future *WatchersCheckConnectivityFuture) result(client WatchersClient) (ci ConnectivityInformation, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersCheckConnectivityFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ci.Response.Response, err = future.GetResult(sender); err == nil && ci.Response.Response.StatusCode != http.StatusNoContent { + ci, err = client.CheckConnectivityResponder(ci.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", ci.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WatchersDeleteFuture struct { @@ -31650,6 +37009,33 @@ type WatchersDeleteFuture struct { Result func(WatchersClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersDeleteFuture.Result. +func (future *WatchersDeleteFuture) result(client WatchersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // WatchersGetAzureReachabilityReportFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetAzureReachabilityReportFuture struct { @@ -31659,6 +37045,39 @@ type WatchersGetAzureReachabilityReportFuture struct { Result func(WatchersClient) (AzureReachabilityReport, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetAzureReachabilityReportFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetAzureReachabilityReportFuture.Result. +func (future *WatchersGetAzureReachabilityReportFuture) result(client WatchersClient) (arr AzureReachabilityReport, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetAzureReachabilityReportFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if arr.Response.Response, err = future.GetResult(sender); err == nil && arr.Response.Response.StatusCode != http.StatusNoContent { + arr, err = client.GetAzureReachabilityReportResponder(arr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", arr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetFlowLogStatusFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetFlowLogStatusFuture struct { @@ -31668,6 +37087,39 @@ type WatchersGetFlowLogStatusFuture struct { Result func(WatchersClient) (FlowLogInformation, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetFlowLogStatusFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetFlowLogStatusFuture.Result. +func (future *WatchersGetFlowLogStatusFuture) result(client WatchersClient) (fli FlowLogInformation, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetFlowLogStatusFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fli.Response.Response, err = future.GetResult(sender); err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { + fli, err = client.GetFlowLogStatusResponder(fli.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", fli.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetNetworkConfigurationDiagnosticFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type WatchersGetNetworkConfigurationDiagnosticFuture struct { @@ -31677,6 +37129,39 @@ type WatchersGetNetworkConfigurationDiagnosticFuture struct { Result func(WatchersClient) (ConfigurationDiagnosticResponse, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetNetworkConfigurationDiagnosticFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetNetworkConfigurationDiagnosticFuture.Result. +func (future *WatchersGetNetworkConfigurationDiagnosticFuture) result(client WatchersClient) (cdr ConfigurationDiagnosticResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetNetworkConfigurationDiagnosticFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cdr.Response.Response, err = future.GetResult(sender); err == nil && cdr.Response.Response.StatusCode != http.StatusNoContent { + cdr, err = client.GetNetworkConfigurationDiagnosticResponder(cdr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", cdr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetNextHopFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WatchersGetNextHopFuture struct { @@ -31686,6 +37171,39 @@ type WatchersGetNextHopFuture struct { Result func(WatchersClient) (NextHopResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetNextHopFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetNextHopFuture.Result. +func (future *WatchersGetNextHopFuture) result(client WatchersClient) (nhr NextHopResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetNextHopFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if nhr.Response.Response, err = future.GetResult(sender); err == nil && nhr.Response.Response.StatusCode != http.StatusNoContent { + nhr, err = client.GetNextHopResponder(nhr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", nhr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetTroubleshootingFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetTroubleshootingFuture struct { @@ -31695,6 +37213,39 @@ type WatchersGetTroubleshootingFuture struct { Result func(WatchersClient) (TroubleshootingResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetTroubleshootingFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetTroubleshootingFuture.Result. +func (future *WatchersGetTroubleshootingFuture) result(client WatchersClient) (tr TroubleshootingResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tr.Response.Response, err = future.GetResult(sender); err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { + tr, err = client.GetTroubleshootingResponder(tr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", tr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetTroubleshootingResultFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetTroubleshootingResultFuture struct { @@ -31704,6 +37255,39 @@ type WatchersGetTroubleshootingResultFuture struct { Result func(WatchersClient) (TroubleshootingResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetTroubleshootingResultFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetTroubleshootingResultFuture.Result. +func (future *WatchersGetTroubleshootingResultFuture) result(client WatchersClient) (tr TroubleshootingResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingResultFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tr.Response.Response, err = future.GetResult(sender); err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { + tr, err = client.GetTroubleshootingResultResponder(tr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", tr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersGetVMSecurityRulesFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersGetVMSecurityRulesFuture struct { @@ -31713,6 +37297,39 @@ type WatchersGetVMSecurityRulesFuture struct { Result func(WatchersClient) (SecurityGroupViewResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersGetVMSecurityRulesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersGetVMSecurityRulesFuture.Result. +func (future *WatchersGetVMSecurityRulesFuture) result(client WatchersClient) (sgvr SecurityGroupViewResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersGetVMSecurityRulesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sgvr.Response.Response, err = future.GetResult(sender); err == nil && sgvr.Response.Response.StatusCode != http.StatusNoContent { + sgvr, err = client.GetVMSecurityRulesResponder(sgvr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", sgvr.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersListAvailableProvidersFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersListAvailableProvidersFuture struct { @@ -31722,6 +37339,39 @@ type WatchersListAvailableProvidersFuture struct { Result func(WatchersClient) (AvailableProvidersList, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersListAvailableProvidersFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersListAvailableProvidersFuture.Result. +func (future *WatchersListAvailableProvidersFuture) result(client WatchersClient) (apl AvailableProvidersList, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersListAvailableProvidersFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if apl.Response.Response, err = future.GetResult(sender); err == nil && apl.Response.Response.StatusCode != http.StatusNoContent { + apl, err = client.ListAvailableProvidersResponder(apl.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", apl.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersSetFlowLogConfigurationFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type WatchersSetFlowLogConfigurationFuture struct { @@ -31731,6 +37381,39 @@ type WatchersSetFlowLogConfigurationFuture struct { Result func(WatchersClient) (FlowLogInformation, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersSetFlowLogConfigurationFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersSetFlowLogConfigurationFuture.Result. +func (future *WatchersSetFlowLogConfigurationFuture) result(client WatchersClient) (fli FlowLogInformation, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersSetFlowLogConfigurationFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if fli.Response.Response, err = future.GetResult(sender); err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { + fli, err = client.SetFlowLogConfigurationResponder(fli.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", fli.Response.Response, "Failure responding to request") + } + } + return +} + // WatchersVerifyIPFlowFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WatchersVerifyIPFlowFuture struct { @@ -31740,6 +37423,39 @@ type WatchersVerifyIPFlowFuture struct { Result func(WatchersClient) (VerificationIPFlowResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WatchersVerifyIPFlowFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WatchersVerifyIPFlowFuture.Result. +func (future *WatchersVerifyIPFlowFuture) result(client WatchersClient) (vifr VerificationIPFlowResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WatchersVerifyIPFlowFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vifr.Response.Response, err = future.GetResult(sender); err == nil && vifr.Response.Response.StatusCode != http.StatusNoContent { + vifr, err = client.VerifyIPFlowResponder(vifr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", vifr.Response.Response, "Failure responding to request") + } + } + return +} + // WebApplicationFirewallCustomRule defines contents of a web application rule. type WebApplicationFirewallCustomRule struct { // Name - The name of the resource that is unique within a policy. This name can be used to access the resource. @@ -31786,6 +37502,33 @@ type WebApplicationFirewallPoliciesDeleteFuture struct { Result func(WebApplicationFirewallPoliciesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WebApplicationFirewallPoliciesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WebApplicationFirewallPoliciesDeleteFuture.Result. +func (future *WebApplicationFirewallPoliciesDeleteFuture) result(client WebApplicationFirewallPoliciesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WebApplicationFirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.WebApplicationFirewallPoliciesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // WebApplicationFirewallPolicy defines web application firewall policy. type WebApplicationFirewallPolicy struct { autorest.Response `json:"-"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/natgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/natgateways.go index fef68c2029b..fa997a008d3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/natgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/natgateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client NatGatewaysClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client NatGatewaysClient) (ng NatGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.NatGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.NatGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ng.Response.Response, err = future.GetResult(sender) - if ng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.NatGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ng.Response.Response.StatusCode != http.StatusNoContent { - ng, err = client.CreateOrUpdateResponder(ng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.NatGatewaysCreateOrUpdateFuture", "Result", ng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client NatGatewaysClient) DeleteSender(req *http.Request) (future NatGatew var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client NatGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.NatGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.NatGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/operations.go index 72815e49d79..b5b1ca5faad 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/operations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpngateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpngateways.go index 33de6e18a0b..c4080b027aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpngateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpngateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client P2sVpnGatewaysClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pvg.Response.Response, err = future.GetResult(sender) - if pvg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { - pvg, err = client.CreateOrUpdateResponder(pvg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysCreateOrUpdateFuture", "Result", pvg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client P2sVpnGatewaysClient) DeleteSender(req *http.Request) (future P2sVp var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -301,30 +254,7 @@ func (client P2sVpnGatewaysClient) GenerateVpnProfileSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (vpr VpnProfileResponse, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysGenerateVpnProfileFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vpr.Response.Response, err = future.GetResult(sender) - if vpr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", nil, "received nil response and error") - } - if err == nil && vpr.Response.Response.StatusCode != http.StatusNoContent { - vpr, err = client.GenerateVpnProfileResponder(vpr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGenerateVpnProfileFuture", "Result", vpr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -479,30 +409,7 @@ func (client P2sVpnGatewaysClient) GetP2sVpnConnectionHealthSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGetP2sVpnConnectionHealthFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysGetP2sVpnConnectionHealthFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pvg.Response.Response, err = future.GetResult(sender) - if pvg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGetP2sVpnConnectionHealthFuture", "Result", nil, "received nil response and error") - } - if err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { - pvg, err = client.GetP2sVpnConnectionHealthResponder(pvg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysGetP2sVpnConnectionHealthFuture", "Result", pvg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -812,30 +719,7 @@ func (client P2sVpnGatewaysClient) UpdateTagsSender(req *http.Request) (future P var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnGatewaysClient) (pvg P2SVpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pvg.Response.Response, err = future.GetResult(sender) - if pvg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && pvg.Response.Response.StatusCode != http.StatusNoContent { - pvg, err = client.UpdateTagsResponder(pvg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnGatewaysUpdateTagsFuture", "Result", pvg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpnserverconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpnserverconfigurations.go index 2b4cbac8958..db73424e594 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpnserverconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/p2svpnserverconfigurations.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -111,30 +100,7 @@ func (client P2sVpnServerConfigurationsClient) CreateOrUpdateSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnServerConfigurationsClient) (pvsc P2SVpnServerConfiguration, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pvsc.Response.Response, err = future.GetResult(sender) - if pvsc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pvsc.Response.Response.StatusCode != http.StatusNoContent { - pvsc, err = client.CreateOrUpdateResponder(pvsc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsCreateOrUpdateFuture", "Result", pvsc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -214,20 +180,7 @@ func (client P2sVpnServerConfigurationsClient) DeleteSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client P2sVpnServerConfigurationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.P2sVpnServerConfigurationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.P2sVpnServerConfigurationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/packetcaptures.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/packetcaptures.go index 65dda76e428..80e3841e41e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/packetcaptures.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/packetcaptures.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -118,30 +107,7 @@ func (client PacketCapturesClient) CreateSender(req *http.Request) (future Packe var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (pcr PacketCaptureResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pcr.Response.Response, err = future.GetResult(sender) - if pcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pcr.Response.Response.StatusCode != http.StatusNoContent { - pcr, err = client.CreateResponder(pcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesCreateFuture", "Result", pcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -221,20 +187,7 @@ func (client PacketCapturesClient) DeleteSender(req *http.Request) (future Packe var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -391,30 +344,7 @@ func (client PacketCapturesClient) GetStatusSender(req *http.Request) (future Pa var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (pcqsr PacketCaptureQueryStatusResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesGetStatusFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pcqsr.Response.Response, err = future.GetResult(sender) - if pcqsr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", nil, "received nil response and error") - } - if err == nil && pcqsr.Response.Response.StatusCode != http.StatusNoContent { - pcqsr, err = client.GetStatusResponder(pcqsr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesGetStatusFuture", "Result", pcqsr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -570,20 +500,7 @@ func (client PacketCapturesClient) StopSender(req *http.Request) (future PacketC var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PacketCapturesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PacketCapturesStopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PacketCapturesStopFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/peerexpressroutecircuitconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/peerexpressroutecircuitconnections.go index 29aef7660b4..8f672ea9003 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/peerexpressroutecircuitconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/peerexpressroutecircuitconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privateendpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privateendpoints.go index 6170372965c..c2be671831b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privateendpoints.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privateendpoints.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client PrivateEndpointsClient) CreateOrUpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateEndpointsClient) (peVar PrivateEndpoint, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateEndpointsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PrivateEndpointsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - peVar.Response.Response, err = future.GetResult(sender) - if peVar.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PrivateEndpointsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && peVar.Response.Response.StatusCode != http.StatusNoContent { - peVar, err = client.CreateOrUpdateResponder(peVar.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateEndpointsCreateOrUpdateFuture", "Result", peVar.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client PrivateEndpointsClient) DeleteSender(req *http.Request) (future Pri var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateEndpointsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateEndpointsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PrivateEndpointsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privatelinkservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privatelinkservices.go index 9c0bcdbb958..45e052d2dfb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privatelinkservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/privatelinkservices.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -105,30 +94,7 @@ func (client PrivateLinkServicesClient) CheckPrivateLinkServiceVisibilitySender( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateLinkServicesClient) (plsv PrivateLinkServiceVisibility, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - plsv.Response.Response, err = future.GetResult(sender) - if plsv.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture", "Result", nil, "received nil response and error") - } - if err == nil && plsv.Response.Response.StatusCode != http.StatusNoContent { - plsv, err = client.CheckPrivateLinkServiceVisibilityResponder(plsv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityFuture", "Result", plsv.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -210,30 +176,7 @@ func (client PrivateLinkServicesClient) CheckPrivateLinkServiceVisibilityByResou var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateLinkServicesClient) (plsv PrivateLinkServiceVisibility, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - plsv.Response.Response, err = future.GetResult(sender) - if plsv.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture", "Result", nil, "received nil response and error") - } - if err == nil && plsv.Response.Response.StatusCode != http.StatusNoContent { - plsv, err = client.CheckPrivateLinkServiceVisibilityByResourceGroupResponder(plsv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCheckPrivateLinkServiceVisibilityByResourceGroupFuture", "Result", plsv.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -314,30 +257,7 @@ func (client PrivateLinkServicesClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateLinkServicesClient) (pls PrivateLinkService, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pls.Response.Response, err = future.GetResult(sender) - if pls.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pls.Response.Response.StatusCode != http.StatusNoContent { - pls, err = client.CreateOrUpdateResponder(pls.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesCreateOrUpdateFuture", "Result", pls.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -415,20 +335,7 @@ func (client PrivateLinkServicesClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateLinkServicesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -507,20 +414,7 @@ func (client PrivateLinkServicesClient) DeletePrivateEndpointConnectionSender(re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateLinkServicesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PrivateLinkServicesDeletePrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PrivateLinkServicesDeletePrivateEndpointConnectionFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/profiles.go index f02b3a7df12..52987cc89b9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/profiles.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/profiles.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -182,20 +171,7 @@ func (client ProfilesClient) DeleteSender(req *http.Request) (future ProfilesDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ProfilesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ProfilesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ProfilesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipaddresses.go index 240ce8de344..ca881ecae05 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipaddresses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipaddresses.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -119,30 +108,7 @@ func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pia.Response.Response, err = future.GetResult(sender) - if pia.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { - pia, err = client.CreateOrUpdateResponder(pia.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", pia.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -220,20 +186,7 @@ func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (future Pu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPAddressesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -954,30 +907,7 @@ func (client PublicIPAddressesClient) UpdateTagsSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pia.Response.Response, err = future.GetResult(sender) - if pia.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { - pia, err = client.UpdateTagsResponder(pia.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesUpdateTagsFuture", "Result", pia.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipprefixes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipprefixes.go index f0ad809e16d..1b4aa71fec2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipprefixes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/publicipprefixes.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client PublicIPPrefixesClient) CreateOrUpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pip.Response.Response, err = future.GetResult(sender) - if pip.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { - pip, err = client.CreateOrUpdateResponder(pip.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesCreateOrUpdateFuture", "Result", pip.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client PublicIPPrefixesClient) DeleteSender(req *http.Request) (future Pub var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPPrefixesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client PublicIPPrefixesClient) UpdateTagsSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PublicIPPrefixesClient) (pip PublicIPPrefix, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.PublicIPPrefixesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pip.Response.Response, err = future.GetResult(sender) - if pip.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && pip.Response.Response.StatusCode != http.StatusNoContent { - pip, err = client.UpdateTagsResponder(pip.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPPrefixesUpdateTagsFuture", "Result", pip.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/resourcenavigationlinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/resourcenavigationlinks.go index 15128430749..f39c77705dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/resourcenavigationlinks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/resourcenavigationlinks.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilterrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilterrules.go index dac09574b59..585db5a40ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilterrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilterrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -120,30 +109,7 @@ func (client RouteFilterRulesClient) CreateOrUpdateSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rfr.Response.Response, err = future.GetResult(sender) - if rfr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { - rfr, err = client.CreateOrUpdateResponder(rfr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesCreateOrUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -223,20 +189,7 @@ func (client RouteFilterRulesClient) DeleteSender(req *http.Request) (future Rou var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFilterRulesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -516,30 +469,7 @@ func (client RouteFilterRulesClient) UpdateSender(req *http.Request) (future Rou var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFilterRulesClient) (rfr RouteFilterRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFilterRulesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rfr.Response.Response, err = future.GetResult(sender) - if rfr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rfr.Response.Response.StatusCode != http.StatusNoContent { - rfr, err = client.UpdateResponder(rfr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFilterRulesUpdateFuture", "Result", rfr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilters.go index 3b01fa0edef..0b59f7b642f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routefilters.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client RouteFiltersClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFiltersClient) (rf RouteFilter, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFiltersCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rf.Response.Response, err = future.GetResult(sender) - if rf.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { - rf, err = client.CreateOrUpdateResponder(rf.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersCreateOrUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client RouteFiltersClient) DeleteSender(req *http.Request) (future RouteFi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFiltersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFiltersDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -613,30 +566,7 @@ func (client RouteFiltersClient) UpdateSender(req *http.Request) (future RouteFi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteFiltersClient) (rf RouteFilter, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteFiltersUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rf.Response.Response, err = future.GetResult(sender) - if rf.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rf.Response.Response.StatusCode != http.StatusNoContent { - rf, err = client.UpdateResponder(rf.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteFiltersUpdateFuture", "Result", rf.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routes.go index a31f071ae56..879fef37360 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routes.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (future Route var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RoutesClient) (r Route, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RoutesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.CreateOrUpdateResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -211,20 +177,7 @@ func (client RoutesClient) DeleteSender(req *http.Request) (future RoutesDeleteF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RoutesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RoutesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routetables.go index 2df71e742be..af9d0ed6ee1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routetables.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/routetables.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteTablesClient) (rt RouteTable, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteTablesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rt.Response.Response, err = future.GetResult(sender) - if rt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { - rt, err = client.CreateOrUpdateResponder(rt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", rt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client RouteTablesClient) DeleteSender(req *http.Request) (future RouteTab var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteTablesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteTablesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -609,30 +562,7 @@ func (client RouteTablesClient) UpdateTagsSender(req *http.Request) (future Rout var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RouteTablesClient) (rt RouteTable, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.RouteTablesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - rt.Response.Response, err = future.GetResult(sender) - if rt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { - rt, err = client.UpdateTagsResponder(rt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesUpdateTagsFuture", "Result", rt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securitygroups.go index 947191c7818..f70e7d214dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securitygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securitygroups.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -106,30 +95,7 @@ func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityGroupsClient) (sg SecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sg.Response.Response, err = future.GetResult(sender) - if sg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { - sg, err = client.CreateOrUpdateResponder(sg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", sg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -207,20 +173,7 @@ func (client SecurityGroupsClient) DeleteSender(req *http.Request) (future Secur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityGroupsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -609,30 +562,7 @@ func (client SecurityGroupsClient) UpdateTagsSender(req *http.Request) (future S var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityGroupsClient) (sg SecurityGroup, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sg.Response.Response, err = future.GetResult(sender) - if sg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { - sg, err = client.UpdateTagsResponder(sg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", sg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securityrules.go index 0a5ad2fbe1d..be36d27aa5d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/securityrules.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityRulesClient) (sr SecurityRule, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityRulesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sr.Response.Response, err = future.GetResult(sender) - if sr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sr.Response.Response.StatusCode != http.StatusNoContent { - sr, err = client.CreateOrUpdateResponder(sr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", sr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -211,20 +177,7 @@ func (client SecurityRulesClient) DeleteSender(req *http.Request) (future Securi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SecurityRulesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SecurityRulesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceassociationlinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceassociationlinks.go index 609694e511e..17feb6c9815 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceassociationlinks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceassociationlinks.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicies.go index b16ac338cbb..943c0b6d245 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicies.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client ServiceEndpointPoliciesClient) CreateOrUpdateSender(req *http.Reque var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sep.Response.Response, err = future.GetResult(sender) - if sep.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { - sep, err = client.CreateOrUpdateResponder(sep.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesCreateOrUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client ServiceEndpointPoliciesClient) DeleteSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPoliciesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -610,30 +563,7 @@ func (client ServiceEndpointPoliciesClient) UpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPoliciesClient) (sep ServiceEndpointPolicy, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPoliciesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sep.Response.Response, err = future.GetResult(sender) - if sep.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sep.Response.Response.StatusCode != http.StatusNoContent { - sep, err = client.UpdateResponder(sep.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPoliciesUpdateFuture", "Result", sep.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicydefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicydefinitions.go index de52258ea4d..a0d2cb7b669 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicydefinitions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/serviceendpointpolicydefinitions.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client ServiceEndpointPolicyDefinitionsClient) CreateOrUpdateSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPolicyDefinitionsClient) (sepd ServiceEndpointPolicyDefinition, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sepd.Response.Response, err = future.GetResult(sender) - if sepd.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sepd.Response.Response.StatusCode != http.StatusNoContent { - sepd, err = client.CreateOrUpdateResponder(sepd.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsCreateOrUpdateFuture", "Result", sepd.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client ServiceEndpointPolicyDefinitionsClient) DeleteSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ServiceEndpointPolicyDefinitionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ServiceEndpointPolicyDefinitionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.ServiceEndpointPolicyDefinitionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/servicetags.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/servicetags.go index 9edda2ea682..1ad74fee0d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/servicetags.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/servicetags.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/subnets.go index 26ad76a4cd8..f1f282092a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/subnets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/subnets.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -108,30 +97,7 @@ func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (future Subn var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SubnetsClient) (s Subnet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SubnetsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.CreateOrUpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -211,20 +177,7 @@ func (client SubnetsClient) DeleteSender(req *http.Request) (future SubnetsDelet var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SubnetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SubnetsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -507,20 +460,7 @@ func (client SubnetsClient) PrepareNetworkPoliciesSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SubnetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsPrepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SubnetsPrepareNetworkPoliciesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -603,20 +543,7 @@ func (client SubnetsClient) UnprepareNetworkPoliciesSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client SubnetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsUnprepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.SubnetsUnprepareNetworkPoliciesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/usages.go index de53395739a..72259ca6165 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/usages.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/version.go index 262ff7da350..d25acde39d6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/version.go @@ -2,19 +2,8 @@ package network import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualhubs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualhubs.go index 777fb924c1b..a8cad848371 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualhubs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualhubs.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VirtualHubsClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualHubsClient) (vh VirtualHub, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualHubsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vh.Response.Response, err = future.GetResult(sender) - if vh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { - vh, err = client.CreateOrUpdateResponder(vh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsCreateOrUpdateFuture", "Result", vh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VirtualHubsClient) DeleteSender(req *http.Request) (future VirtualH var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualHubsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualHubsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -606,30 +559,7 @@ func (client VirtualHubsClient) UpdateTagsSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualHubsClient) (vh VirtualHub, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualHubsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vh.Response.Response, err = future.GetResult(sender) - if vh.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vh.Response.Response.StatusCode != http.StatusNoContent { - vh, err = client.UpdateTagsResponder(vh.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualHubsUpdateTagsFuture", "Result", vh.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgatewayconnections.go index 154d5d20434..a1ff464d69a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgatewayconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgatewayconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -121,30 +110,7 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vngc.Response.Response, err = future.GetResult(sender) - if vngc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { - vngc, err = client.CreateOrUpdateResponder(vngc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", vngc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -222,20 +188,7 @@ func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -597,30 +550,7 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (crsk ConnectionResetSharedKey, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - crsk.Response.Response, err = future.GetResult(sender) - if crsk.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", nil, "received nil response and error") - } - if err == nil && crsk.Response.Response.StatusCode != http.StatusNoContent { - crsk, err = client.ResetSharedKeyResponder(crsk.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", crsk.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -710,30 +640,7 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (csk ConnectionSharedKey, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - csk.Response.Response, err = future.GetResult(sender) - if csk.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", nil, "received nil response and error") - } - if err == nil && csk.Response.Response.StatusCode != http.StatusNoContent { - csk, err = client.SetSharedKeyResponder(csk.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", csk.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -818,30 +725,7 @@ func (client VirtualNetworkGatewayConnectionsClient) StartPacketCaptureSender(re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStartPacketCaptureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsStartPacketCaptureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStartPacketCaptureFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.StartPacketCaptureResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStartPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -923,30 +807,7 @@ func (client VirtualNetworkGatewayConnectionsClient) StopPacketCaptureSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStopPacketCaptureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsStopPacketCaptureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStopPacketCaptureFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.StopPacketCaptureResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsStopPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1027,30 +888,7 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTagsSender(req *http. var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vngc.Response.Response, err = future.GetResult(sender) - if vngc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { - vngc, err = client.UpdateTagsResponder(vngc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsUpdateTagsFuture", "Result", vngc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgateways.go index f882d2f692b..b5771fc9506 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkgateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -114,30 +103,7 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vng.Response.Response, err = future.GetResult(sender) - if vng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { - vng, err = client.CreateOrUpdateResponder(vng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", vng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -215,20 +181,7 @@ func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -309,30 +262,7 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *h var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.GeneratevpnclientpackageResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -414,30 +344,7 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfileSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGenerateVpnProfileFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.GenerateVpnProfileResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGenerateVpnProfileFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -594,30 +501,7 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - grlr.Response.Response, err = future.GetResult(sender) - if grlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", nil, "received nil response and error") - } - if err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { - grlr, err = client.GetAdvertisedRoutesResponder(grlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetAdvertisedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -699,30 +583,7 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (bpslr BgpPeerStatusListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetBgpPeerStatusFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - bpslr.Response.Response, err = future.GetResult(sender) - if bpslr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", nil, "received nil response and error") - } - if err == nil && bpslr.Response.Response.StatusCode != http.StatusNoContent { - bpslr, err = client.GetBgpPeerStatusResponder(bpslr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetBgpPeerStatusFuture", "Result", bpslr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -801,30 +662,7 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (grlr GatewayRouteListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetLearnedRoutesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - grlr.Response.Response, err = future.GetResult(sender) - if grlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", nil, "received nil response and error") - } - if err == nil && grlr.Response.Response.StatusCode != http.StatusNoContent { - grlr, err = client.GetLearnedRoutesResponder(grlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetLearnedRoutesFuture", "Result", grlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -903,30 +741,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnclientConnectionHealthSender(re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vcchdlr VpnClientConnectionHealthDetailListResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vcchdlr.Response.Response, err = future.GetResult(sender) - if vcchdlr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture", "Result", nil, "received nil response and error") - } - if err == nil && vcchdlr.Response.Response.StatusCode != http.StatusNoContent { - vcchdlr, err = client.GetVpnclientConnectionHealthResponder(vcchdlr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientConnectionHealthFuture", "Result", vcchdlr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1006,30 +821,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnclientIpsecParametersSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vcipp.Response.Response, err = future.GetResult(sender) - if vcipp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", nil, "received nil response and error") - } - if err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { - vcipp, err = client.GetVpnclientIpsecParametersResponder(vcipp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1108,30 +900,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLSender(req *ht var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.GetVpnProfilePackageURLResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGetVpnProfilePackageURLFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1448,30 +1217,7 @@ func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vng.Response.Response, err = future.GetResult(sender) - if vng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", nil, "received nil response and error") - } - if err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { - vng, err = client.ResetResponder(vng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", vng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1550,20 +1296,7 @@ func (client VirtualNetworkGatewaysClient) ResetVpnClientSharedKeySender(req *ht var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetVpnClientSharedKeyFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1652,30 +1385,7 @@ func (client VirtualNetworkGatewaysClient) SetVpnclientIpsecParametersSender(req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vcipp VpnClientIPsecParameters, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vcipp.Response.Response, err = future.GetResult(sender) - if vcipp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", nil, "received nil response and error") - } - if err == nil && vcipp.Response.Response.StatusCode != http.StatusNoContent { - vcipp, err = client.SetVpnclientIpsecParametersResponder(vcipp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysSetVpnclientIpsecParametersFuture", "Result", vcipp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1759,30 +1469,7 @@ func (client VirtualNetworkGatewaysClient) StartPacketCaptureSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStartPacketCaptureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysStartPacketCaptureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStartPacketCaptureFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.StartPacketCaptureResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStartPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1863,30 +1550,7 @@ func (client VirtualNetworkGatewaysClient) StopPacketCaptureSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (s String, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStopPacketCaptureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysStopPacketCaptureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - s.Response.Response, err = future.GetResult(sender) - if s.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStopPacketCaptureFuture", "Result", nil, "received nil response and error") - } - if err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.StopPacketCaptureResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysStopPacketCaptureFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -2043,30 +1707,7 @@ func (client VirtualNetworkGatewaysClient) UpdateTagsSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vng.Response.Response, err = future.GetResult(sender) - if vng.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { - vng, err = client.UpdateTagsResponder(vng.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysUpdateTagsFuture", "Result", vng.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkpeerings.go index 595bf9c4a71..ba0b00142bc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworkpeerings.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkPeeringsClient) (vnp VirtualNetworkPeering, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnp.Response.Response, err = future.GetResult(sender) - if vnp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnp.Response.Response.StatusCode != http.StatusNoContent { - vnp, err = client.CreateOrUpdateResponder(vnp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsCreateOrUpdateFuture", "Result", vnp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkPeeringsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkPeeringsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworks.go index fe57138b4fa..68477541fb7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworks.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -184,30 +173,7 @@ func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworksClient) (vn VirtualNetwork, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vn.Response.Response, err = future.GetResult(sender) - if vn.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { - vn, err = client.CreateOrUpdateResponder(vn.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", vn.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -285,20 +251,7 @@ func (client VirtualNetworksClient) DeleteSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -805,30 +758,7 @@ func (client VirtualNetworksClient) UpdateTagsSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworksClient) (vn VirtualNetwork, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vn.Response.Response, err = future.GetResult(sender) - if vn.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { - vn, err = client.UpdateTagsResponder(vn.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksUpdateTagsFuture", "Result", vn.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworktaps.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworktaps.go index c6628ca896d..198258c230e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworktaps.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualnetworktaps.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -139,30 +128,7 @@ func (client VirtualNetworkTapsClient) CreateOrUpdateSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnt.Response.Response, err = future.GetResult(sender) - if vnt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { - vnt, err = client.CreateOrUpdateResponder(vnt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsCreateOrUpdateFuture", "Result", vnt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -240,20 +206,7 @@ func (client VirtualNetworkTapsClient) DeleteSender(req *http.Request) (future V var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkTapsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -638,30 +591,7 @@ func (client VirtualNetworkTapsClient) UpdateTagsSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkTapsClient) (vnt VirtualNetworkTap, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkTapsUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnt.Response.Response, err = future.GetResult(sender) - if vnt.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnt.Response.Response.StatusCode != http.StatusNoContent { - vnt, err = client.UpdateTagsResponder(vnt.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkTapsUpdateTagsFuture", "Result", vnt.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouterpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouterpeerings.go index ccd950429a5..837d58143c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouterpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouterpeerings.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client VirtualRouterPeeringsClient) CreateOrUpdateSender(req *http.Request var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualRouterPeeringsClient) (vrp VirtualRouterPeering, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualRouterPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualRouterPeeringsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vrp.Response.Response, err = future.GetResult(sender) - if vrp.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualRouterPeeringsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vrp.Response.Response.StatusCode != http.StatusNoContent { - vrp, err = client.CreateOrUpdateResponder(vrp.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualRouterPeeringsCreateOrUpdateFuture", "Result", vrp.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -226,20 +192,7 @@ func (client VirtualRouterPeeringsClient) DeleteSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualRouterPeeringsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualRouterPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualRouterPeeringsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouters.go index a19d43ee35d..c7eb3ac0800 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualrouters.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -119,30 +108,7 @@ func (client VirtualRoutersClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualRoutersClient) (vr VirtualRouter, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualRoutersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualRoutersCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vr.Response.Response, err = future.GetResult(sender) - if vr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualRoutersCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vr.Response.Response.StatusCode != http.StatusNoContent { - vr, err = client.CreateOrUpdateResponder(vr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualRoutersCreateOrUpdateFuture", "Result", vr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -220,20 +186,7 @@ func (client VirtualRoutersClient) DeleteSender(req *http.Request) (future Virtu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualRoutersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualRoutersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualRoutersDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualwans.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualwans.go index 6f24dd2b967..463b4a37fee 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualwans.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/virtualwans.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VirtualWansClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualWansClient) (vw VirtualWAN, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualWansCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vw.Response.Response, err = future.GetResult(sender) - if vw.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { - vw, err = client.CreateOrUpdateResponder(vw.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansCreateOrUpdateFuture", "Result", vw.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VirtualWansClient) DeleteSender(req *http.Request) (future VirtualW var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualWansClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualWansDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -606,30 +559,7 @@ func (client VirtualWansClient) UpdateTagsSender(req *http.Request) (future Virt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualWansClient) (vw VirtualWAN, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VirtualWansUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vw.Response.Response, err = future.GetResult(sender) - if vw.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vw.Response.Response.StatusCode != http.StatusNoContent { - vw, err = client.UpdateTagsResponder(vw.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualWansUpdateTagsFuture", "Result", vw.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnconnections.go index eb37660165c..fffdc290278 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -110,30 +99,7 @@ func (client VpnConnectionsClient) CreateOrUpdateSender(req *http.Request) (futu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnConnectionsClient) (vc VpnConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vc.Response.Response, err = future.GetResult(sender) - if vc.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vc.Response.Response.StatusCode != http.StatusNoContent { - vc, err = client.CreateOrUpdateResponder(vc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsCreateOrUpdateFuture", "Result", vc.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -213,20 +179,7 @@ func (client VpnConnectionsClient) DeleteSender(req *http.Request) (future VpnCo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpngateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpngateways.go index 4143607e8db..aa8b5ee3fa2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpngateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpngateways.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VpnGatewaysClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnGatewaysClient) (vg VpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vg.Response.Response, err = future.GetResult(sender) - if vg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { - vg, err = client.CreateOrUpdateResponder(vg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysCreateOrUpdateFuture", "Result", vg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VpnGatewaysClient) DeleteSender(req *http.Request) (future VpnGatew var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnGatewaysClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -603,30 +556,7 @@ func (client VpnGatewaysClient) ResetSender(req *http.Request) (future VpnGatewa var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnGatewaysClient) (vg VpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysResetFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysResetFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vg.Response.Response, err = future.GetResult(sender) - if vg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysResetFuture", "Result", nil, "received nil response and error") - } - if err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { - vg, err = client.ResetResponder(vg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysResetFuture", "Result", vg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -707,30 +637,7 @@ func (client VpnGatewaysClient) UpdateTagsSender(req *http.Request) (future VpnG var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnGatewaysClient) (vg VpnGateway, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnGatewaysUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vg.Response.Response, err = future.GetResult(sender) - if vg.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vg.Response.Response.StatusCode != http.StatusNoContent { - vg, err = client.UpdateTagsResponder(vg.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnGatewaysUpdateTagsFuture", "Result", vg.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnlinkconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnlinkconnections.go index f4df019f5ba..ee8266f8be7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnlinkconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnlinkconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinkconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinkconnections.go index 04df66479b6..54cffe846e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinkconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinkconnections.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinks.go index 748b6a2d50f..1ed1f0c04c4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitelinks.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsites.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsites.go index 1dbf144a68f..02ee61a4329 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsites.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsites.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -107,30 +96,7 @@ func (client VpnSitesClient) CreateOrUpdateSender(req *http.Request) (future Vpn var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesClient) (vs VpnSite, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vs.Response.Response, err = future.GetResult(sender) - if vs.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { - vs, err = client.CreateOrUpdateResponder(vs.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesCreateOrUpdateFuture", "Result", vs.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -208,20 +174,7 @@ func (client VpnSitesClient) DeleteSender(req *http.Request) (future VpnSitesDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -606,30 +559,7 @@ func (client VpnSitesClient) UpdateTagsSender(req *http.Request) (future VpnSite var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesClient) (vs VpnSite, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vs.Response.Response, err = future.GetResult(sender) - if vs.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", nil, "received nil response and error") - } - if err == nil && vs.Response.Response.StatusCode != http.StatusNoContent { - vs, err = client.UpdateTagsResponder(vs.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesUpdateTagsFuture", "Result", vs.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitesconfiguration.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitesconfiguration.go index 5ccd0d167dd..8941bdf6698 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitesconfiguration.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/vpnsitesconfiguration.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -114,20 +103,7 @@ func (client VpnSitesConfigurationClient) DownloadSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VpnSitesConfigurationClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VpnSitesConfigurationDownloadFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.VpnSitesConfigurationDownloadFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/watchers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/watchers.go index f9ad7fbd162..5b157ea0f71 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/watchers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/watchers.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,30 +105,7 @@ func (client WatchersClient) CheckConnectivitySender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (ci ConnectivityInformation, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersCheckConnectivityFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ci.Response.Response, err = future.GetResult(sender) - if ci.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", nil, "received nil response and error") - } - if err == nil && ci.Response.Response.StatusCode != http.StatusNoContent { - ci, err = client.CheckConnectivityResponder(ci.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersCheckConnectivityFuture", "Result", ci.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -296,20 +262,7 @@ func (client WatchersClient) DeleteSender(req *http.Request) (future WatchersDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -475,30 +428,7 @@ func (client WatchersClient) GetAzureReachabilityReportSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (arr AzureReachabilityReport, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetAzureReachabilityReportFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - arr.Response.Response, err = future.GetResult(sender) - if arr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", nil, "received nil response and error") - } - if err == nil && arr.Response.Response.StatusCode != http.StatusNoContent { - arr, err = client.GetAzureReachabilityReportResponder(arr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetAzureReachabilityReportFuture", "Result", arr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -585,30 +515,7 @@ func (client WatchersClient) GetFlowLogStatusSender(req *http.Request) (future W var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (fli FlowLogInformation, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetFlowLogStatusFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - fli.Response.Response, err = future.GetResult(sender) - if fli.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", nil, "received nil response and error") - } - if err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { - fli, err = client.GetFlowLogStatusResponder(fli.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetFlowLogStatusFuture", "Result", fli.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -700,30 +607,7 @@ func (client WatchersClient) GetNetworkConfigurationDiagnosticSender(req *http.R var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (cdr ConfigurationDiagnosticResponse, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetNetworkConfigurationDiagnosticFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cdr.Response.Response, err = future.GetResult(sender) - if cdr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", nil, "received nil response and error") - } - if err == nil && cdr.Response.Response.StatusCode != http.StatusNoContent { - cdr, err = client.GetNetworkConfigurationDiagnosticResponder(cdr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNetworkConfigurationDiagnosticFuture", "Result", cdr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -812,30 +696,7 @@ func (client WatchersClient) GetNextHopSender(req *http.Request) (future Watcher var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (nhr NextHopResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetNextHopFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - nhr.Response.Response, err = future.GetResult(sender) - if nhr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", nil, "received nil response and error") - } - if err == nil && nhr.Response.Response.StatusCode != http.StatusNoContent { - nhr, err = client.GetNextHopResponder(nhr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetNextHopFuture", "Result", nhr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1005,30 +866,7 @@ func (client WatchersClient) GetTroubleshootingSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (tr TroubleshootingResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tr.Response.Response, err = future.GetResult(sender) - if tr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", nil, "received nil response and error") - } - if err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { - tr, err = client.GetTroubleshootingResponder(tr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingFuture", "Result", tr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1115,30 +953,7 @@ func (client WatchersClient) GetTroubleshootingResultSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (tr TroubleshootingResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetTroubleshootingResultFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tr.Response.Response, err = future.GetResult(sender) - if tr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", nil, "received nil response and error") - } - if err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { - tr, err = client.GetTroubleshootingResultResponder(tr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetTroubleshootingResultFuture", "Result", tr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1225,30 +1040,7 @@ func (client WatchersClient) GetVMSecurityRulesSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (sgvr SecurityGroupViewResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersGetVMSecurityRulesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sgvr.Response.Response, err = future.GetResult(sender) - if sgvr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", nil, "received nil response and error") - } - if err == nil && sgvr.Response.Response.StatusCode != http.StatusNoContent { - sgvr, err = client.GetVMSecurityRulesResponder(sgvr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersGetVMSecurityRulesFuture", "Result", sgvr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1474,30 +1266,7 @@ func (client WatchersClient) ListAvailableProvidersSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (apl AvailableProvidersList, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersListAvailableProvidersFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - apl.Response.Response, err = future.GetResult(sender) - if apl.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", nil, "received nil response and error") - } - if err == nil && apl.Response.Response.StatusCode != http.StatusNoContent { - apl, err = client.ListAvailableProvidersResponder(apl.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersListAvailableProvidersFuture", "Result", apl.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1592,30 +1361,7 @@ func (client WatchersClient) SetFlowLogConfigurationSender(req *http.Request) (f var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (fli FlowLogInformation, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersSetFlowLogConfigurationFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - fli.Response.Response, err = future.GetResult(sender) - if fli.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", nil, "received nil response and error") - } - if err == nil && fli.Response.Response.StatusCode != http.StatusNoContent { - fli, err = client.SetFlowLogConfigurationResponder(fli.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersSetFlowLogConfigurationFuture", "Result", fli.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1785,30 +1531,7 @@ func (client WatchersClient) VerifyIPFlowSender(req *http.Request) (future Watch var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WatchersClient) (vifr VerificationIPFlowResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WatchersVerifyIPFlowFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vifr.Response.Response, err = future.GetResult(sender) - if vifr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", nil, "received nil response and error") - } - if err == nil && vifr.Response.Response.StatusCode != http.StatusNoContent { - vifr, err = client.VerifyIPFlowResponder(vifr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WatchersVerifyIPFlowFuture", "Result", vifr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/webapplicationfirewallpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/webapplicationfirewallpolicies.go index 521bdd23d27..825cb49f4e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/webapplicationfirewallpolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-07-01/network/webapplicationfirewallpolicies.go @@ -1,18 +1,7 @@ package network -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -196,20 +185,7 @@ func (client WebApplicationFirewallPoliciesClient) DeleteSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WebApplicationFirewallPoliciesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "network.WebApplicationFirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("network.WebApplicationFirewallPoliciesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/CHANGELOG.md index dbf52bd3fd2..afe900313b3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/CHANGELOG.md @@ -1,5 +1,5 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/authorization/resource-manager/readme.md tag: `package-2018-09-01-preview` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/_meta.json new file mode 100644 index 00000000000..29a34497e78 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/authorization/resource-manager/readme.md", + "tag": "package-2018-09-01-preview", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2018-09-01-preview --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/authorization/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/classicadministrators.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/classicadministrators.go index 6762e12124b..71dc9f3cdf7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/classicadministrators.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/classicadministrators.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/client.go index 1958a67601a..8accd9738c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/client.go @@ -3,19 +3,8 @@ // package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/denyassignments.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/denyassignments.go index 4e842662dac..f5d7d038799 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/denyassignments.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/denyassignments.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/enums.go index ec477d6ae4e..f0c637120a4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/enums.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/globaladministrator.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/globaladministrator.go index 8eb18e9189a..562717c1620 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/globaladministrator.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/globaladministrator.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/models.go index 79176bcc19b..1581f8a7163 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/models.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/permissions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/permissions.go index ae33769803f..62d87b8d120 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/permissions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/permissions.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/provideroperationsmetadata.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/provideroperationsmetadata.go index 888fb783fd1..359c843c082 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/provideroperationsmetadata.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/provideroperationsmetadata.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roleassignments.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roleassignments.go index fdedb771a87..ea9b915211a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roleassignments.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roleassignments.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roledefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roledefinitions.go index 74a6f6e18f9..d3ee5a13aca 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roledefinitions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/roledefinitions.go @@ -1,18 +1,7 @@ package authorization -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/version.go index 6dbc7350646..16a6559fae9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization/version.go @@ -2,19 +2,8 @@ package authorization import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/CHANGELOG.md index f4015b339f6..6dacc3037c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/CHANGELOG.md @@ -1,5 +1,48 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/containerregistry/resource-manager/readme.md tag: `package-2020-11-preview` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *AgentPoolsCreateFuture.UnmarshalJSON([]byte) error +1. *AgentPoolsDeleteFuture.UnmarshalJSON([]byte) error +1. *AgentPoolsUpdateFuture.UnmarshalJSON([]byte) error +1. *ConnectedRegistriesCreateFuture.UnmarshalJSON([]byte) error +1. *ConnectedRegistriesDeactivateFuture.UnmarshalJSON([]byte) error +1. *ConnectedRegistriesDeleteFuture.UnmarshalJSON([]byte) error +1. *ConnectedRegistriesUpdateFuture.UnmarshalJSON([]byte) error +1. *ExportPipelinesCreateFuture.UnmarshalJSON([]byte) error +1. *ExportPipelinesDeleteFuture.UnmarshalJSON([]byte) error +1. *ImportPipelinesCreateFuture.UnmarshalJSON([]byte) error +1. *ImportPipelinesDeleteFuture.UnmarshalJSON([]byte) error +1. *PipelineRunsCreateFuture.UnmarshalJSON([]byte) error +1. *PipelineRunsDeleteFuture.UnmarshalJSON([]byte) error +1. *PrivateEndpointConnectionsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PrivateEndpointConnectionsDeleteFuture.UnmarshalJSON([]byte) error +1. *RegistriesCreateFuture.UnmarshalJSON([]byte) error +1. *RegistriesDeleteFuture.UnmarshalJSON([]byte) error +1. *RegistriesGenerateCredentialsFuture.UnmarshalJSON([]byte) error +1. *RegistriesImportImageFuture.UnmarshalJSON([]byte) error +1. *RegistriesScheduleRunFuture.UnmarshalJSON([]byte) error +1. *RegistriesUpdateFuture.UnmarshalJSON([]byte) error +1. *ReplicationsCreateFuture.UnmarshalJSON([]byte) error +1. *ReplicationsDeleteFuture.UnmarshalJSON([]byte) error +1. *ReplicationsUpdateFuture.UnmarshalJSON([]byte) error +1. *RunsCancelFuture.UnmarshalJSON([]byte) error +1. *RunsUpdateFuture.UnmarshalJSON([]byte) error +1. *ScopeMapsCreateFuture.UnmarshalJSON([]byte) error +1. *ScopeMapsDeleteFuture.UnmarshalJSON([]byte) error +1. *ScopeMapsUpdateFuture.UnmarshalJSON([]byte) error +1. *TaskRunsCreateFuture.UnmarshalJSON([]byte) error +1. *TaskRunsDeleteFuture.UnmarshalJSON([]byte) error +1. *TaskRunsUpdateFuture.UnmarshalJSON([]byte) error +1. *TasksCreateFuture.UnmarshalJSON([]byte) error +1. *TasksDeleteFuture.UnmarshalJSON([]byte) error +1. *TasksUpdateFuture.UnmarshalJSON([]byte) error +1. *TokensCreateFuture.UnmarshalJSON([]byte) error +1. *TokensDeleteFuture.UnmarshalJSON([]byte) error +1. *TokensUpdateFuture.UnmarshalJSON([]byte) error +1. *WebhooksCreateFuture.UnmarshalJSON([]byte) error +1. *WebhooksDeleteFuture.UnmarshalJSON([]byte) error +1. *WebhooksUpdateFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/_meta.json new file mode 100644 index 00000000000..897db444785 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/containerregistry/resource-manager/readme.md", + "tag": "package-2020-11-preview", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2020-11-preview --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/containerregistry/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/agentpools.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/agentpools.go index 1399df8b17f..6c8eb868fb0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/agentpools.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/agentpools.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client AgentPoolsClient) CreateSender(req *http.Request) (future AgentPool var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AgentPoolsClient) (ap AgentPool, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.AgentPoolsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ap.Response.Response, err = future.GetResult(sender) - if ap.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ap.Response.Response.StatusCode != http.StatusNoContent { - ap, err = client.CreateResponder(ap.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsCreateFuture", "Result", ap.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -240,20 +206,7 @@ func (client AgentPoolsClient) DeleteSender(req *http.Request) (future AgentPool var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AgentPoolsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.AgentPoolsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -661,30 +614,7 @@ func (client AgentPoolsClient) UpdateSender(req *http.Request) (future AgentPool var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AgentPoolsClient) (ap AgentPool, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.AgentPoolsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ap.Response.Response, err = future.GetResult(sender) - if ap.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ap.Response.Response.StatusCode != http.StatusNoContent { - ap, err = client.UpdateResponder(ap.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsUpdateFuture", "Result", ap.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/client.go index 578a4382314..79937e12376 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/client.go @@ -3,19 +3,8 @@ // package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/connectedregistries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/connectedregistries.go index f447ad09bbc..94bfc5c5ae8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/connectedregistries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/connectedregistries.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -133,30 +122,7 @@ func (client ConnectedRegistriesClient) CreateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectedRegistriesClient) (cr ConnectedRegistry, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cr.Response.Response, err = future.GetResult(sender) - if cr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && cr.Response.Response.StatusCode != http.StatusNoContent { - cr, err = client.CreateResponder(cr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesCreateFuture", "Result", cr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -250,20 +216,7 @@ func (client ConnectedRegistriesClient) DeactivateSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectedRegistriesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesDeactivateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesDeactivateFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -356,20 +309,7 @@ func (client ConnectedRegistriesClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectedRegistriesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -691,30 +631,7 @@ func (client ConnectedRegistriesClient) UpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ConnectedRegistriesClient) (cr ConnectedRegistry, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - cr.Response.Response, err = future.GetResult(sender) - if cr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && cr.Response.Response.StatusCode != http.StatusNoContent { - cr, err = client.UpdateResponder(cr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesUpdateFuture", "Result", cr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/enums.go index 2adfb391ba2..809d217b24e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/enums.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/exportpipelines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/exportpipelines.go index 25e1aa96002..b499a249ad8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/exportpipelines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/exportpipelines.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -128,30 +117,7 @@ func (client ExportPipelinesClient) CreateSender(req *http.Request) (future Expo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExportPipelinesClient) (ep ExportPipeline, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ExportPipelinesCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ExportPipelinesCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - ep.Response.Response, err = future.GetResult(sender) - if ep.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ExportPipelinesCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && ep.Response.Response.StatusCode != http.StatusNoContent { - ep, err = client.CreateResponder(ep.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ExportPipelinesCreateFuture", "Result", ep.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -245,20 +211,7 @@ func (client ExportPipelinesClient) DeleteSender(req *http.Request) (future Expo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ExportPipelinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ExportPipelinesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ExportPipelinesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/importpipelines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/importpipelines.go index ad23803047a..87742043d92 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/importpipelines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/importpipelines.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -128,30 +117,7 @@ func (client ImportPipelinesClient) CreateSender(req *http.Request) (future Impo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImportPipelinesClient) (IP ImportPipeline, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ImportPipelinesCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ImportPipelinesCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - IP.Response.Response, err = future.GetResult(sender) - if IP.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ImportPipelinesCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && IP.Response.Response.StatusCode != http.StatusNoContent { - IP, err = client.CreateResponder(IP.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ImportPipelinesCreateFuture", "Result", IP.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -245,20 +211,7 @@ func (client ImportPipelinesClient) DeleteSender(req *http.Request) (future Impo var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ImportPipelinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ImportPipelinesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ImportPipelinesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/models.go index 15eb4399967..bee9c622b93 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/models.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -379,6 +368,39 @@ type AgentPoolsCreateFuture struct { Result func(AgentPoolsClient) (AgentPool, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AgentPoolsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AgentPoolsCreateFuture.Result. +func (future *AgentPoolsCreateFuture) result(client AgentPoolsClient) (ap AgentPool, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.AgentPoolsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ap.Response.Response, err = future.GetResult(sender); err == nil && ap.Response.Response.StatusCode != http.StatusNoContent { + ap, err = client.CreateResponder(ap.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsCreateFuture", "Result", ap.Response.Response, "Failure responding to request") + } + } + return +} + // AgentPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type AgentPoolsDeleteFuture struct { @@ -388,6 +410,33 @@ type AgentPoolsDeleteFuture struct { Result func(AgentPoolsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AgentPoolsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AgentPoolsDeleteFuture.Result. +func (future *AgentPoolsDeleteFuture) result(client AgentPoolsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.AgentPoolsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // AgentPoolsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type AgentPoolsUpdateFuture struct { @@ -397,6 +446,39 @@ type AgentPoolsUpdateFuture struct { Result func(AgentPoolsClient) (AgentPool, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AgentPoolsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AgentPoolsUpdateFuture.Result. +func (future *AgentPoolsUpdateFuture) result(client AgentPoolsClient) (ap AgentPool, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.AgentPoolsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ap.Response.Response, err = future.GetResult(sender); err == nil && ap.Response.Response.StatusCode != http.StatusNoContent { + ap, err = client.UpdateResponder(ap.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AgentPoolsUpdateFuture", "Result", ap.Response.Response, "Failure responding to request") + } + } + return +} + // AgentPoolUpdateParameters the parameters for updating an agent pool. type AgentPoolUpdateParameters struct { // AgentPoolPropertiesUpdateParameters - The properties associated with the agent pool @@ -566,6 +648,39 @@ type ConnectedRegistriesCreateFuture struct { Result func(ConnectedRegistriesClient) (ConnectedRegistry, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectedRegistriesCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectedRegistriesCreateFuture.Result. +func (future *ConnectedRegistriesCreateFuture) result(client ConnectedRegistriesClient) (cr ConnectedRegistry, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent { + cr, err = client.CreateResponder(cr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesCreateFuture", "Result", cr.Response.Response, "Failure responding to request") + } + } + return +} + // ConnectedRegistriesDeactivateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ConnectedRegistriesDeactivateFuture struct { @@ -575,6 +690,33 @@ type ConnectedRegistriesDeactivateFuture struct { Result func(ConnectedRegistriesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectedRegistriesDeactivateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectedRegistriesDeactivateFuture.Result. +func (future *ConnectedRegistriesDeactivateFuture) result(client ConnectedRegistriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesDeactivateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesDeactivateFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectedRegistriesDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ConnectedRegistriesDeleteFuture struct { @@ -584,6 +726,33 @@ type ConnectedRegistriesDeleteFuture struct { Result func(ConnectedRegistriesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectedRegistriesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectedRegistriesDeleteFuture.Result. +func (future *ConnectedRegistriesDeleteFuture) result(client ConnectedRegistriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ConnectedRegistriesUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ConnectedRegistriesUpdateFuture struct { @@ -593,6 +762,39 @@ type ConnectedRegistriesUpdateFuture struct { Result func(ConnectedRegistriesClient) (ConnectedRegistry, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ConnectedRegistriesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ConnectedRegistriesUpdateFuture.Result. +func (future *ConnectedRegistriesUpdateFuture) result(client ConnectedRegistriesClient) (cr ConnectedRegistry, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ConnectedRegistriesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent { + cr, err = client.UpdateResponder(cr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ConnectedRegistriesUpdateFuture", "Result", cr.Response.Response, "Failure responding to request") + } + } + return +} + // ConnectedRegistry an object that represents a connected registry for a container registry. type ConnectedRegistry struct { autorest.Response `json:"-"` @@ -2080,6 +2282,39 @@ type ExportPipelinesCreateFuture struct { Result func(ExportPipelinesClient) (ExportPipeline, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExportPipelinesCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExportPipelinesCreateFuture.Result. +func (future *ExportPipelinesCreateFuture) result(client ExportPipelinesClient) (ep ExportPipeline, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ExportPipelinesCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ExportPipelinesCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ep.Response.Response, err = future.GetResult(sender); err == nil && ep.Response.Response.StatusCode != http.StatusNoContent { + ep, err = client.CreateResponder(ep.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ExportPipelinesCreateFuture", "Result", ep.Response.Response, "Failure responding to request") + } + } + return +} + // ExportPipelinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ExportPipelinesDeleteFuture struct { @@ -2089,6 +2324,33 @@ type ExportPipelinesDeleteFuture struct { Result func(ExportPipelinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExportPipelinesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExportPipelinesDeleteFuture.Result. +func (future *ExportPipelinesDeleteFuture) result(client ExportPipelinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ExportPipelinesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ExportPipelinesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ExportPipelineTargetProperties the properties of the export pipeline target. type ExportPipelineTargetProperties struct { // Type - The type of target for the export pipeline. @@ -2731,6 +2993,39 @@ type ImportPipelinesCreateFuture struct { Result func(ImportPipelinesClient) (ImportPipeline, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImportPipelinesCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImportPipelinesCreateFuture.Result. +func (future *ImportPipelinesCreateFuture) result(client ImportPipelinesClient) (IP ImportPipeline, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ImportPipelinesCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ImportPipelinesCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if IP.Response.Response, err = future.GetResult(sender); err == nil && IP.Response.Response.StatusCode != http.StatusNoContent { + IP, err = client.CreateResponder(IP.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ImportPipelinesCreateFuture", "Result", IP.Response.Response, "Failure responding to request") + } + } + return +} + // ImportPipelinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ImportPipelinesDeleteFuture struct { @@ -2740,6 +3035,33 @@ type ImportPipelinesDeleteFuture struct { Result func(ImportPipelinesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ImportPipelinesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ImportPipelinesDeleteFuture.Result. +func (future *ImportPipelinesDeleteFuture) result(client ImportPipelinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ImportPipelinesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ImportPipelinesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ImportPipelineSourceProperties the properties of the import pipeline source. type ImportPipelineSourceProperties struct { // Type - The type of source for the import pipeline. Possible values include: 'AzureStorageBlobContainer' @@ -3467,6 +3789,39 @@ type PipelineRunsCreateFuture struct { Result func(PipelineRunsClient) (PipelineRun, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PipelineRunsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PipelineRunsCreateFuture.Result. +func (future *PipelineRunsCreateFuture) result(client PipelineRunsClient) (pr PipelineRun, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.PipelineRunsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.PipelineRunsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pr.Response.Response, err = future.GetResult(sender); err == nil && pr.Response.Response.StatusCode != http.StatusNoContent { + pr, err = client.CreateResponder(pr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.PipelineRunsCreateFuture", "Result", pr.Response.Response, "Failure responding to request") + } + } + return +} + // PipelineRunsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PipelineRunsDeleteFuture struct { @@ -3476,6 +3831,33 @@ type PipelineRunsDeleteFuture struct { Result func(PipelineRunsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PipelineRunsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PipelineRunsDeleteFuture.Result. +func (future *PipelineRunsDeleteFuture) result(client PipelineRunsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.PipelineRunsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.PipelineRunsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PipelineRunSourceProperties ... type PipelineRunSourceProperties struct { // Type - The type of the source. Possible values include: 'AzureStorageBlob' @@ -3829,6 +4211,39 @@ type PrivateEndpointConnectionsCreateOrUpdateFuture struct { Result func(PrivateEndpointConnectionsClient) (PrivateEndpointConnection, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateEndpointConnectionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateEndpointConnectionsCreateOrUpdateFuture.Result. +func (future *PrivateEndpointConnectionsCreateOrUpdateFuture) result(client PrivateEndpointConnectionsClient) (pec PrivateEndpointConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.PrivateEndpointConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.PrivateEndpointConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pec.Response.Response, err = future.GetResult(sender); err == nil && pec.Response.Response.StatusCode != http.StatusNoContent { + pec, err = client.CreateOrUpdateResponder(pec.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.PrivateEndpointConnectionsCreateOrUpdateFuture", "Result", pec.Response.Response, "Failure responding to request") + } + } + return +} + // PrivateEndpointConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type PrivateEndpointConnectionsDeleteFuture struct { @@ -3838,6 +4253,33 @@ type PrivateEndpointConnectionsDeleteFuture struct { Result func(PrivateEndpointConnectionsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateEndpointConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateEndpointConnectionsDeleteFuture.Result. +func (future *PrivateEndpointConnectionsDeleteFuture) result(client PrivateEndpointConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.PrivateEndpointConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.PrivateEndpointConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PrivateLinkResource a resource that supports private link capabilities. type PrivateLinkResource struct { // Type - READ-ONLY; The resource type is private link resource. @@ -4137,6 +4579,39 @@ type RegistriesCreateFuture struct { Result func(RegistriesClient) (Registry, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RegistriesCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RegistriesCreateFuture.Result. +func (future *RegistriesCreateFuture) result(client RegistriesClient) (r Registry, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.CreateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // RegistriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RegistriesDeleteFuture struct { @@ -4146,6 +4621,33 @@ type RegistriesDeleteFuture struct { Result func(RegistriesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RegistriesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RegistriesDeleteFuture.Result. +func (future *RegistriesDeleteFuture) result(client RegistriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // RegistriesGenerateCredentialsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type RegistriesGenerateCredentialsFuture struct { @@ -4155,6 +4657,39 @@ type RegistriesGenerateCredentialsFuture struct { Result func(RegistriesClient) (GenerateCredentialsResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RegistriesGenerateCredentialsFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RegistriesGenerateCredentialsFuture.Result. +func (future *RegistriesGenerateCredentialsFuture) result(client RegistriesClient) (gcr GenerateCredentialsResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesGenerateCredentialsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesGenerateCredentialsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gcr.Response.Response, err = future.GetResult(sender); err == nil && gcr.Response.Response.StatusCode != http.StatusNoContent { + gcr, err = client.GenerateCredentialsResponder(gcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesGenerateCredentialsFuture", "Result", gcr.Response.Response, "Failure responding to request") + } + } + return +} + // RegistriesImportImageFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RegistriesImportImageFuture struct { @@ -4164,6 +4699,33 @@ type RegistriesImportImageFuture struct { Result func(RegistriesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RegistriesImportImageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RegistriesImportImageFuture.Result. +func (future *RegistriesImportImageFuture) result(client RegistriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesImportImageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesImportImageFuture") + return + } + ar.Response = future.Response() + return +} + // RegistriesScheduleRunFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RegistriesScheduleRunFuture struct { @@ -4173,6 +4735,39 @@ type RegistriesScheduleRunFuture struct { Result func(RegistriesClient) (Run, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RegistriesScheduleRunFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RegistriesScheduleRunFuture.Result. +func (future *RegistriesScheduleRunFuture) result(client RegistriesClient) (r Run, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesScheduleRunFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.ScheduleRunResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // RegistriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type RegistriesUpdateFuture struct { @@ -4182,6 +4777,39 @@ type RegistriesUpdateFuture struct { Result func(RegistriesClient) (Registry, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RegistriesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RegistriesUpdateFuture.Result. +func (future *RegistriesUpdateFuture) result(client RegistriesClient) (r Registry, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.UpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // Registry an object that represents a container registry. type Registry struct { autorest.Response `json:"-"` @@ -5006,6 +5634,39 @@ type ReplicationsCreateFuture struct { Result func(ReplicationsClient) (Replication, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ReplicationsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ReplicationsCreateFuture.Result. +func (future *ReplicationsCreateFuture) result(client ReplicationsClient) (r Replication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.CreateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // ReplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ReplicationsDeleteFuture struct { @@ -5015,6 +5676,33 @@ type ReplicationsDeleteFuture struct { Result func(ReplicationsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ReplicationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ReplicationsDeleteFuture.Result. +func (future *ReplicationsDeleteFuture) result(client ReplicationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ReplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ReplicationsUpdateFuture struct { @@ -5024,6 +5712,39 @@ type ReplicationsUpdateFuture struct { Result func(ReplicationsClient) (Replication, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ReplicationsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ReplicationsUpdateFuture.Result. +func (future *ReplicationsUpdateFuture) result(client ReplicationsClient) (r Replication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.UpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // ReplicationUpdateParameters the parameters for updating a replication. type ReplicationUpdateParameters struct { // Tags - The tags for the replication. @@ -5663,6 +6384,33 @@ type RunsCancelFuture struct { Result func(RunsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RunsCancelFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RunsCancelFuture.Result. +func (future *RunsCancelFuture) result(client RunsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsCancelFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RunsCancelFuture") + return + } + ar.Response = future.Response() + return +} + // RunsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type RunsUpdateFuture struct { azure.FutureAPI @@ -5671,6 +6419,39 @@ type RunsUpdateFuture struct { Result func(RunsClient) (Run, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RunsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RunsUpdateFuture.Result. +func (future *RunsUpdateFuture) result(client RunsClient) (r Run, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.RunsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.UpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + // RunUpdateParameters the set of run properties that can be updated. type RunUpdateParameters struct { // IsArchiveEnabled - The value that indicates whether archiving is enabled or not. @@ -5967,6 +6748,39 @@ type ScopeMapsCreateFuture struct { Result func(ScopeMapsClient) (ScopeMap, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ScopeMapsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ScopeMapsCreateFuture.Result. +func (future *ScopeMapsCreateFuture) result(client ScopeMapsClient) (sm ScopeMap, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ScopeMapsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sm.Response.Response, err = future.GetResult(sender); err == nil && sm.Response.Response.StatusCode != http.StatusNoContent { + sm, err = client.CreateResponder(sm.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsCreateFuture", "Result", sm.Response.Response, "Failure responding to request") + } + } + return +} + // ScopeMapsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ScopeMapsDeleteFuture struct { @@ -5976,6 +6790,33 @@ type ScopeMapsDeleteFuture struct { Result func(ScopeMapsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ScopeMapsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ScopeMapsDeleteFuture.Result. +func (future *ScopeMapsDeleteFuture) result(client ScopeMapsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ScopeMapsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ScopeMapsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ScopeMapsUpdateFuture struct { @@ -5985,6 +6826,39 @@ type ScopeMapsUpdateFuture struct { Result func(ScopeMapsClient) (ScopeMap, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ScopeMapsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ScopeMapsUpdateFuture.Result. +func (future *ScopeMapsUpdateFuture) result(client ScopeMapsClient) (sm ScopeMap, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.ScopeMapsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sm.Response.Response, err = future.GetResult(sender); err == nil && sm.Response.Response.StatusCode != http.StatusNoContent { + sm, err = client.UpdateResponder(sm.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsUpdateFuture", "Result", sm.Response.Response, "Failure responding to request") + } + } + return +} + // ScopeMapUpdateParameters the properties for updating the scope map. type ScopeMapUpdateParameters struct { // ScopeMapPropertiesUpdateParameters - The update parameters for scope map properties. @@ -7321,6 +8195,39 @@ type TaskRunsCreateFuture struct { Result func(TaskRunsClient) (TaskRun, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TaskRunsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TaskRunsCreateFuture.Result. +func (future *TaskRunsCreateFuture) result(client TaskRunsClient) (tr TaskRun, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TaskRunsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tr.Response.Response, err = future.GetResult(sender); err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { + tr, err = client.CreateResponder(tr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsCreateFuture", "Result", tr.Response.Response, "Failure responding to request") + } + } + return +} + // TaskRunsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type TaskRunsDeleteFuture struct { @@ -7330,6 +8237,33 @@ type TaskRunsDeleteFuture struct { Result func(TaskRunsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TaskRunsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TaskRunsDeleteFuture.Result. +func (future *TaskRunsDeleteFuture) result(client TaskRunsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TaskRunsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // TaskRunsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type TaskRunsUpdateFuture struct { @@ -7339,6 +8273,39 @@ type TaskRunsUpdateFuture struct { Result func(TaskRunsClient) (TaskRun, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TaskRunsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TaskRunsUpdateFuture.Result. +func (future *TaskRunsUpdateFuture) result(client TaskRunsClient) (tr TaskRun, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TaskRunsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if tr.Response.Response, err = future.GetResult(sender); err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { + tr, err = client.UpdateResponder(tr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsUpdateFuture", "Result", tr.Response.Response, "Failure responding to request") + } + } + return +} + // TaskRunUpdateParameters the parameters for updating a task run. type TaskRunUpdateParameters struct { // Identity - Identity for the resource. @@ -7428,6 +8395,39 @@ type TasksCreateFuture struct { Result func(TasksClient) (Task, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TasksCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TasksCreateFuture.Result. +func (future *TasksCreateFuture) result(client TasksClient) (t Task, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TasksCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.CreateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + // TasksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type TasksDeleteFuture struct { azure.FutureAPI @@ -7436,6 +8436,33 @@ type TasksDeleteFuture struct { Result func(TasksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TasksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TasksDeleteFuture.Result. +func (future *TasksDeleteFuture) result(client TasksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TasksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // BasicTaskStepProperties base properties for any task step. type BasicTaskStepProperties interface { AsDockerBuildStep() (*DockerBuildStep, bool) @@ -7654,6 +8681,39 @@ type TasksUpdateFuture struct { Result func(TasksClient) (Task, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TasksUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TasksUpdateFuture.Result. +func (future *TasksUpdateFuture) result(client TasksClient) (t Task, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TasksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.UpdateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + // TaskUpdateParameters the parameters for updating a task. type TaskUpdateParameters struct { // Identity - Identity for the resource. @@ -8091,6 +9151,39 @@ type TokensCreateFuture struct { Result func(TokensClient) (Token, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TokensCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TokensCreateFuture.Result. +func (future *TokensCreateFuture) result(client TokensClient) (t Token, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TokensCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TokensCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.CreateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TokensCreateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + // TokensDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type TokensDeleteFuture struct { azure.FutureAPI @@ -8099,6 +9192,33 @@ type TokensDeleteFuture struct { Result func(TokensClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TokensDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TokensDeleteFuture.Result. +func (future *TokensDeleteFuture) result(client TokensClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TokensDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TokensDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // TokensUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type TokensUpdateFuture struct { azure.FutureAPI @@ -8107,6 +9227,39 @@ type TokensUpdateFuture struct { Result func(TokensClient) (Token, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *TokensUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for TokensUpdateFuture.Result. +func (future *TokensUpdateFuture) result(client TokensClient) (t Token, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TokensUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.TokensUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.UpdateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TokensUpdateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + // TokenUpdateParameters the parameters for updating a token. type TokenUpdateParameters struct { // TokenUpdateProperties - The properties of the token update parameters. @@ -8644,6 +9797,39 @@ type WebhooksCreateFuture struct { Result func(WebhooksClient) (Webhook, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WebhooksCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WebhooksCreateFuture.Result. +func (future *WebhooksCreateFuture) result(client WebhooksClient) (w Webhook, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent { + w, err = client.CreateResponder(w.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", w.Response.Response, "Failure responding to request") + } + } + return +} + // WebhooksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WebhooksDeleteFuture struct { @@ -8653,6 +9839,33 @@ type WebhooksDeleteFuture struct { Result func(WebhooksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WebhooksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WebhooksDeleteFuture.Result. +func (future *WebhooksDeleteFuture) result(client WebhooksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // WebhooksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type WebhooksUpdateFuture struct { @@ -8662,6 +9875,39 @@ type WebhooksUpdateFuture struct { Result func(WebhooksClient) (Webhook, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *WebhooksUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for WebhooksUpdateFuture.Result. +func (future *WebhooksUpdateFuture) result(client WebhooksClient) (w Webhook, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent { + w, err = client.UpdateResponder(w.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", w.Response.Response, "Failure responding to request") + } + } + return +} + // WebhookUpdateParameters the parameters for updating a webhook. type WebhookUpdateParameters struct { // Tags - The tags for the webhook. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/operations.go index a5531da6e69..dd925b7a80a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/operations.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/pipelineruns.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/pipelineruns.go index 20e55624b38..634f06313ed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/pipelineruns.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/pipelineruns.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -132,30 +121,7 @@ func (client PipelineRunsClient) CreateSender(req *http.Request) (future Pipelin var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PipelineRunsClient) (pr PipelineRun, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.PipelineRunsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.PipelineRunsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pr.Response.Response, err = future.GetResult(sender) - if pr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.PipelineRunsCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pr.Response.Response.StatusCode != http.StatusNoContent { - pr, err = client.CreateResponder(pr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.PipelineRunsCreateFuture", "Result", pr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -249,20 +215,7 @@ func (client PipelineRunsClient) DeleteSender(req *http.Request) (future Pipelin var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PipelineRunsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.PipelineRunsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.PipelineRunsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/privateendpointconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/privateendpointconnections.go index 10e592d934c..abde6ae5f72 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/privateendpointconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/privateendpointconnections.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -121,30 +110,7 @@ func (client PrivateEndpointConnectionsClient) CreateOrUpdateSender(req *http.Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateEndpointConnectionsClient) (pec PrivateEndpointConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.PrivateEndpointConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.PrivateEndpointConnectionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pec.Response.Response, err = future.GetResult(sender) - if pec.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.PrivateEndpointConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pec.Response.Response.StatusCode != http.StatusNoContent { - pec, err = client.CreateOrUpdateResponder(pec.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.PrivateEndpointConnectionsCreateOrUpdateFuture", "Result", pec.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -234,20 +200,7 @@ func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateEndpointConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.PrivateEndpointConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.PrivateEndpointConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/registries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/registries.go index a598ddc738b..76f5ee460e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/registries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/registries.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -210,30 +199,7 @@ func (client RegistriesClient) CreateSender(req *http.Request) (future Registrie var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RegistriesClient) (r Registry, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.CreateResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesCreateFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -321,20 +287,7 @@ func (client RegistriesClient) DeleteSender(req *http.Request) (future Registrie var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RegistriesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -424,30 +377,7 @@ func (client RegistriesClient) GenerateCredentialsSender(req *http.Request) (fut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RegistriesClient) (gcr GenerateCredentialsResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesGenerateCredentialsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesGenerateCredentialsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gcr.Response.Response, err = future.GetResult(sender) - if gcr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesGenerateCredentialsFuture", "Result", nil, "received nil response and error") - } - if err == nil && gcr.Response.Response.StatusCode != http.StatusNoContent { - gcr, err = client.GenerateCredentialsResponder(gcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesGenerateCredentialsFuture", "Result", gcr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -716,20 +646,7 @@ func (client RegistriesClient) ImportImageSender(req *http.Request) (future Regi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RegistriesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesImportImageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesImportImageFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1444,30 +1361,7 @@ func (client RegistriesClient) ScheduleRunSender(req *http.Request) (future Regi var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RegistriesClient) (r Run, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesScheduleRunFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.ScheduleRunResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesScheduleRunFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1558,30 +1452,7 @@ func (client RegistriesClient) UpdateSender(req *http.Request) (future Registrie var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RegistriesClient) (r Registry, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RegistriesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.UpdateResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesUpdateFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/replications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/replications.go index 782541c3d98..e4b7eef896a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/replications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/replications.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client ReplicationsClient) CreateSender(req *http.Request) (future Replica var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ReplicationsClient) (r Replication, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.CreateResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsCreateFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -240,20 +206,7 @@ func (client ReplicationsClient) DeleteSender(req *http.Request) (future Replica var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ReplicationsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -569,30 +522,7 @@ func (client ReplicationsClient) UpdateSender(req *http.Request) (future Replica var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ReplicationsClient) (r Replication, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ReplicationsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.UpdateResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsUpdateFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/runs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/runs.go index 146afc390e9..3a732f676bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/runs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/runs.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -116,20 +105,7 @@ func (client RunsClient) CancelSender(req *http.Request) (future RunsCancelFutur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RunsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RunsCancelFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RunsCancelFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -534,30 +510,7 @@ func (client RunsClient) UpdateSender(req *http.Request) (future RunsUpdateFutur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client RunsClient) (r Run, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.RunsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - r.Response.Response, err = future.GetResult(sender) - if r.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && r.Response.Response.StatusCode != http.StatusNoContent { - r, err = client.UpdateResponder(r.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RunsUpdateFuture", "Result", r.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/scopemaps.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/scopemaps.go index aae949d69bf..741c768ba81 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/scopemaps.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/scopemaps.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -126,30 +115,7 @@ func (client ScopeMapsClient) CreateSender(req *http.Request) (future ScopeMapsC var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ScopeMapsClient) (sm ScopeMap, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ScopeMapsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sm.Response.Response, err = future.GetResult(sender) - if sm.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sm.Response.Response.StatusCode != http.StatusNoContent { - sm, err = client.CreateResponder(sm.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsCreateFuture", "Result", sm.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -243,20 +209,7 @@ func (client ScopeMapsClient) DeleteSender(req *http.Request) (future ScopeMapsD var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ScopeMapsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ScopeMapsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -572,30 +525,7 @@ func (client ScopeMapsClient) UpdateSender(req *http.Request) (future ScopeMapsU var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ScopeMapsClient) (sm ScopeMap, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.ScopeMapsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - sm.Response.Response, err = future.GetResult(sender) - if sm.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && sm.Response.Response.StatusCode != http.StatusNoContent { - sm, err = client.UpdateResponder(sm.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.ScopeMapsUpdateFuture", "Result", sm.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/taskruns.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/taskruns.go index b64339deab9..d1eb5ab299b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/taskruns.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/taskruns.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client TaskRunsClient) CreateSender(req *http.Request) (future TaskRunsCre var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TaskRunsClient) (tr TaskRun, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TaskRunsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tr.Response.Response, err = future.GetResult(sender) - if tr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { - tr, err = client.CreateResponder(tr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsCreateFuture", "Result", tr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -240,20 +206,7 @@ func (client TaskRunsClient) DeleteSender(req *http.Request) (future TaskRunsDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TaskRunsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TaskRunsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -661,30 +614,7 @@ func (client TaskRunsClient) UpdateSender(req *http.Request) (future TaskRunsUpd var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TaskRunsClient) (tr TaskRun, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TaskRunsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - tr.Response.Response, err = future.GetResult(sender) - if tr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && tr.Response.Response.StatusCode != http.StatusNoContent { - tr, err = client.UpdateResponder(tr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TaskRunsUpdateFuture", "Result", tr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tasks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tasks.go index 9e421236638..a8e13820dc8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tasks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tasks.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -134,30 +123,7 @@ func (client TasksClient) CreateSender(req *http.Request) (future TasksCreateFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TasksClient) (t Task, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TasksCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - t.Response.Response, err = future.GetResult(sender) - if t.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && t.Response.Response.StatusCode != http.StatusNoContent { - t, err = client.CreateResponder(t.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TasksCreateFuture", "Result", t.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -251,20 +217,7 @@ func (client TasksClient) DeleteSender(req *http.Request) (future TasksDeleteFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TasksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TasksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TasksDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -672,30 +625,7 @@ func (client TasksClient) UpdateSender(req *http.Request) (future TasksUpdateFut var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TasksClient) (t Task, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TasksUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - t.Response.Response, err = future.GetResult(sender) - if t.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && t.Response.Response.StatusCode != http.StatusNoContent { - t, err = client.UpdateResponder(t.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TasksUpdateFuture", "Result", t.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tokens.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tokens.go index 70917a48cc4..b67b600cebe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tokens.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/tokens.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -123,30 +112,7 @@ func (client TokensClient) CreateSender(req *http.Request) (future TokensCreateF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TokensClient) (t Token, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TokensCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TokensCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - t.Response.Response, err = future.GetResult(sender) - if t.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.TokensCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && t.Response.Response.StatusCode != http.StatusNoContent { - t, err = client.CreateResponder(t.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TokensCreateFuture", "Result", t.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -240,20 +206,7 @@ func (client TokensClient) DeleteSender(req *http.Request) (future TokensDeleteF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TokensClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TokensDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TokensDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -569,30 +522,7 @@ func (client TokensClient) UpdateSender(req *http.Request) (future TokensUpdateF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client TokensClient) (t Token, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TokensUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.TokensUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - t.Response.Response, err = future.GetResult(sender) - if t.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.TokensUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && t.Response.Response.StatusCode != http.StatusNoContent { - t, err = client.UpdateResponder(t.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.TokensUpdateFuture", "Result", t.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/version.go index b50f69e2f48..a4ef593a99e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/version.go @@ -2,19 +2,8 @@ package containerregistry import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/webhooks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/webhooks.go index fa7338b12aa..85d464fb844 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/webhooks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry/webhooks.go @@ -1,18 +1,7 @@ package containerregistry -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -129,30 +118,7 @@ func (client WebhooksClient) CreateSender(req *http.Request) (future WebhooksCre var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WebhooksClient) (w Webhook, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - w.Response.Response, err = future.GetResult(sender) - if w.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && w.Response.Response.StatusCode != http.StatusNoContent { - w, err = client.CreateResponder(w.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.WebhooksCreateFuture", "Result", w.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -246,20 +212,7 @@ func (client WebhooksClient) DeleteSender(req *http.Request) (future WebhooksDel var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WebhooksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.WebhooksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -893,30 +846,7 @@ func (client WebhooksClient) UpdateSender(req *http.Request) (future WebhooksUpd var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client WebhooksClient) (w Webhook, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("containerregistry.WebhooksUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - w.Response.Response, err = future.GetResult(sender) - if w.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && w.Response.Response.StatusCode != http.StatusNoContent { - w, err = client.UpdateResponder(w.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.WebhooksUpdateFuture", "Result", w.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md index 8bcbffb42e4..33c171381bb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md @@ -1,5 +1,5 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/monitor/resource-manager/readme.md tag: `package-2018-03` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/_meta.json new file mode 100644 index 00000000000..2ae7275e8af --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/monitor/resource-manager/readme.md", + "tag": "package-2018-03", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2018-03 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/monitor/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go index e9386d3f894..43753cb5b29 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go index feec0c05d94..44aaf3ef1e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go index e1ddd8474d1..16dd215e6ad 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go index dd854793d47..c3bf26828d8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go index 5fcade2cdaa..f5a2ab68238 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go index e3e0fe9d28e..86f253d0fb8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go index 557e8413f01..a777181b078 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go @@ -3,19 +3,8 @@ // Monitor Management Client package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go index baaf0d2976e..9708c52c249 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go index 841ec82d6ad..48365ed67e6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go index 7e299940e74..1bab3243cf6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go index 0069d84da2c..86e59b42436 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go index 5074f5435e9..ad1500b87bc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go index fe0efc42781..94231b6fa34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go index a757c23ae4c..43cfaed4276 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go index fb9c7d1e572..d395fc67bc5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go index 1d0a872599f..69fc2e0b465 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go index 352bd7b4cae..97687a09b49 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go index 88d2493db97..fa54ccbf93b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go index 1943bd686cb..3f82351c7f2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go index 677442ab45b..c9d43964ecb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go index 47679c79861..7501f047bb3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go @@ -1,18 +1,7 @@ package insights -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go index dbd91009bbf..f18278031f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go @@ -2,19 +2,8 @@ package insights import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md index d6ac227c2d8..4fd87c505ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md @@ -1,5 +1,13 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/privatedns/resource-manager/readme.md tag: `package-2018-09` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *PrivateZonesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *PrivateZonesDeleteFuture.UnmarshalJSON([]byte) error +1. *PrivateZonesUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkLinksCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkLinksDeleteFuture.UnmarshalJSON([]byte) error +1. *VirtualNetworkLinksUpdateFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json new file mode 100644 index 00000000000..6836e63a6a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md", + "tag": "package-2018-09", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2018-09 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go index 25bc56eb9c6..48bd23da58c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go @@ -3,19 +3,8 @@ // The Private DNS Management Client. package privatedns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go index 23e530ee2c0..a4cbf2a65ea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go @@ -1,18 +1,7 @@ package privatedns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go index 1eb3c738150..ca17bb080c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go @@ -1,18 +1,7 @@ package privatedns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -384,6 +373,39 @@ type PrivateZonesCreateOrUpdateFuture struct { Result func(PrivateZonesClient) (PrivateZone, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateZonesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateZonesCreateOrUpdateFuture.Result. +func (future *PrivateZonesCreateOrUpdateFuture) result(client PrivateZonesClient) (pz PrivateZone, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pz.Response.Response, err = future.GetResult(sender); err == nil && pz.Response.Response.StatusCode != http.StatusNoContent { + pz, err = client.CreateOrUpdateResponder(pz.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", pz.Response.Response, "Failure responding to request") + } + } + return +} + // PrivateZonesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PrivateZonesDeleteFuture struct { @@ -393,6 +415,33 @@ type PrivateZonesDeleteFuture struct { Result func(PrivateZonesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateZonesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateZonesDeleteFuture.Result. +func (future *PrivateZonesDeleteFuture) result(client PrivateZonesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // PrivateZonesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type PrivateZonesUpdateFuture struct { @@ -402,6 +451,39 @@ type PrivateZonesUpdateFuture struct { Result func(PrivateZonesClient) (PrivateZone, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *PrivateZonesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for PrivateZonesUpdateFuture.Result. +func (future *PrivateZonesUpdateFuture) result(client PrivateZonesClient) (pz PrivateZone, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pz.Response.Response, err = future.GetResult(sender); err == nil && pz.Response.Response.StatusCode != http.StatusNoContent { + pz, err = client.UpdateResponder(pz.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", pz.Response.Response, "Failure responding to request") + } + } + return +} + // ProxyResource the resource model definition for an ARM proxy resource. type ProxyResource struct { // ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. @@ -1132,6 +1214,39 @@ type VirtualNetworkLinksCreateOrUpdateFuture struct { Result func(VirtualNetworkLinksClient) (VirtualNetworkLink, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkLinksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkLinksCreateOrUpdateFuture.Result. +func (future *VirtualNetworkLinksCreateOrUpdateFuture) result(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnl.Response.Response, err = future.GetResult(sender); err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent { + vnl, err = client.CreateOrUpdateResponder(vnl.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request") + } + } + return +} + // VirtualNetworkLinksDeleteFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkLinksDeleteFuture struct { @@ -1141,6 +1256,33 @@ type VirtualNetworkLinksDeleteFuture struct { Result func(VirtualNetworkLinksClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkLinksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkLinksDeleteFuture.Result. +func (future *VirtualNetworkLinksDeleteFuture) result(client VirtualNetworkLinksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualNetworkLinksUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type VirtualNetworkLinksUpdateFuture struct { @@ -1149,3 +1291,36 @@ type VirtualNetworkLinksUpdateFuture struct { // If the operation has not completed it will return an error. Result func(VirtualNetworkLinksClient) (VirtualNetworkLink, error) } + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *VirtualNetworkLinksUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for VirtualNetworkLinksUpdateFuture.Result. +func (future *VirtualNetworkLinksUpdateFuture) result(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vnl.Response.Response, err = future.GetResult(sender); err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent { + vnl, err = client.UpdateResponder(vnl.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request") + } + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go index 858f0ce1c1b..2e10b275cd2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go @@ -1,18 +1,7 @@ package privatedns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -119,30 +108,7 @@ func (client PrivateZonesClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateZonesClient) (pz PrivateZone, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pz.Response.Response, err = future.GetResult(sender) - if pz.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pz.Response.Response.StatusCode != http.StatusNoContent { - pz, err = client.CreateOrUpdateResponder(pz.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", pz.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -227,20 +193,7 @@ func (client PrivateZonesClient) DeleteSender(req *http.Request) (future Private var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateZonesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -641,30 +594,7 @@ func (client PrivateZonesClient) UpdateSender(req *http.Request) (future Private var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client PrivateZonesClient) (pz PrivateZone, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - pz.Response.Response, err = future.GetResult(sender) - if pz.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && pz.Response.Response.StatusCode != http.StatusNoContent { - pz, err = client.UpdateResponder(pz.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", pz.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go index e9db2397914..2c863316f27 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go @@ -1,18 +1,7 @@ package privatedns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go index 61033fc4c09..32bd8ba132e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go @@ -2,19 +2,8 @@ package privatedns import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go index 5c5007ee322..e85b36982e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go @@ -1,18 +1,7 @@ package privatedns -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -122,30 +111,7 @@ func (client VirtualNetworkLinksClient) CreateOrUpdateSender(req *http.Request) var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnl.Response.Response, err = future.GetResult(sender) - if vnl.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent { - vnl, err = client.CreateOrUpdateResponder(vnl.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -233,20 +199,7 @@ func (client VirtualNetworkLinksClient) DeleteSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkLinksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -536,30 +489,7 @@ func (client VirtualNetworkLinksClient) UpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - vnl.Response.Response, err = future.GetResult(sender) - if vnl.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent { - vnl, err = client.UpdateResponder(vnl.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/CHANGELOG.md index b91d30f136a..56770509a9c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/CHANGELOG.md @@ -1,5 +1,5 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/resources/resource-manager/readme.md tag: `package-features-2015-12` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/_meta.json new file mode 100644 index 00000000000..bc98b8db963 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "tag": "package-features-2015-12", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-features-2015-12 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/client.go index 68c28ccd222..511547962d3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/client.go @@ -6,19 +6,8 @@ // functionality. package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/features.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/features.go index 6645af20980..42088f87ad9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/features.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/features.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/models.go index df5b4528a07..d5b01ec0053 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/models.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/version.go index 97ea7225aaf..382caa0ee96 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features/version.go @@ -2,19 +2,8 @@ package features import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/CHANGELOG.md index 5219f3ed9da..2a35dd4a14b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/CHANGELOG.md @@ -1,5 +1,22 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/resources/resource-manager/readme.md tag: `package-resources-2019-05` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *CreateOrUpdateByIDFuture.UnmarshalJSON([]byte) error +1. *CreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DeleteByIDFuture.UnmarshalJSON([]byte) error +1. *DeleteFuture.UnmarshalJSON([]byte) error +1. *DeploymentsCreateOrUpdateAtManagementGroupScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsCreateOrUpdateAtSubscriptionScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteAtManagementGroupScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteAtSubscriptionScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteFuture.UnmarshalJSON([]byte) error +1. *GroupsDeleteFuture.UnmarshalJSON([]byte) error +1. *MoveResourcesFuture.UnmarshalJSON([]byte) error +1. *UpdateByIDFuture.UnmarshalJSON([]byte) error +1. *UpdateFuture.UnmarshalJSON([]byte) error +1. *ValidateMoveResourcesFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/_meta.json new file mode 100644 index 00000000000..43f6e94ed68 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "tag": "package-resources-2019-05", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-resources-2019-05 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/client.go index ab582ee0369..242ed156b29 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/client.go @@ -3,19 +3,8 @@ // Provides operations for working with resources and resource groups. package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deploymentoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deploymentoperations.go index d274be75bbe..41091a402c0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deploymentoperations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deploymentoperations.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deployments.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deployments.go index 6a4679d5898..b4a6cd91c32 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deployments.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deployments.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -711,30 +700,7 @@ func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -833,30 +799,7 @@ func (client DeploymentsClient) CreateOrUpdateAtManagementGroupScopeSender(req * var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateAtManagementGroupScopeResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -951,30 +894,7 @@ func (client DeploymentsClient) CreateOrUpdateAtSubscriptionScopeSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateAtSubscriptionScopeResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1071,20 +991,7 @@ func (client DeploymentsClient) DeleteSender(req *http.Request) (future Deployme var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1176,20 +1083,7 @@ func (client DeploymentsClient) DeleteAtManagementGroupScopeSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteAtManagementGroupScopeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1277,20 +1171,7 @@ func (client DeploymentsClient) DeleteAtSubscriptionScopeSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteAtSubscriptionScopeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/enums.go index ddced0a6a23..90b8ab23b96 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/enums.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/groups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/groups.go index 69518c72135..69548e9ee34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/groups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/groups.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -284,20 +273,7 @@ func (client GroupsClient) DeleteSender(req *http.Request) (future GroupsDeleteF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client GroupsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.GroupsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.GroupsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/models.go index b531df08927..1178f2c5478 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/models.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -71,6 +60,39 @@ type CreateOrUpdateByIDFuture struct { Result func(Client) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CreateOrUpdateByIDFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CreateOrUpdateByIDFuture.Result. +func (future *CreateOrUpdateByIDFuture) result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.CreateOrUpdateByIDFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.CreateOrUpdateByIDResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // CreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type CreateOrUpdateFuture struct { @@ -80,6 +102,39 @@ type CreateOrUpdateFuture struct { Result func(Client) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CreateOrUpdateFuture.Result. +func (future *CreateOrUpdateFuture) result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.CreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.CreateOrUpdateResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // DebugSetting the debug setting. type DebugSetting struct { // DetailLevel - Specifies the type of information to log for debugging. The permitted values are none, requestContent, responseContent, or both requestContent and responseContent separated by a comma. The default is none. When setting this value, carefully consider the type of information you are passing in during deployment. By logging information about the request or response, you could potentially expose sensitive data that is retrieved through the deployment operations. @@ -94,6 +149,33 @@ type DeleteByIDFuture struct { Result func(Client) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeleteByIDFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeleteByIDFuture.Result. +func (future *DeleteByIDFuture) result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeleteByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeleteByIDFuture") + return + } + ar.Response = future.Response() + return +} + // DeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type DeleteFuture struct { azure.FutureAPI @@ -102,6 +184,33 @@ type DeleteFuture struct { Result func(Client) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeleteFuture.Result. +func (future *DeleteFuture) result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeleteFuture") + return + } + ar.Response = future.Response() + return +} + // Dependency deployment dependency information. type Dependency struct { // DependsOn - The list of dependencies. @@ -636,6 +745,39 @@ type DeploymentsCreateOrUpdateAtManagementGroupScopeFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateAtManagementGroupScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateAtManagementGroupScopeFuture.Result. +func (future *DeploymentsCreateOrUpdateAtManagementGroupScopeFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateAtManagementGroupScopeResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsCreateOrUpdateAtSubscriptionScopeFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type DeploymentsCreateOrUpdateAtSubscriptionScopeFuture struct { @@ -645,6 +787,39 @@ type DeploymentsCreateOrUpdateAtSubscriptionScopeFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateAtSubscriptionScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateAtSubscriptionScopeFuture.Result. +func (future *DeploymentsCreateOrUpdateAtSubscriptionScopeFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateAtSubscriptionScopeResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsCreateOrUpdateFuture struct { @@ -654,6 +829,39 @@ type DeploymentsCreateOrUpdateFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateFuture.Result. +func (future *DeploymentsCreateOrUpdateFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsDeleteAtManagementGroupScopeFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type DeploymentsDeleteAtManagementGroupScopeFuture struct { @@ -663,6 +871,33 @@ type DeploymentsDeleteAtManagementGroupScopeFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteAtManagementGroupScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteAtManagementGroupScopeFuture.Result. +func (future *DeploymentsDeleteAtManagementGroupScopeFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteAtManagementGroupScopeFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentsDeleteAtSubscriptionScopeFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsDeleteAtSubscriptionScopeFuture struct { @@ -672,6 +907,33 @@ type DeploymentsDeleteAtSubscriptionScopeFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteAtSubscriptionScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteAtSubscriptionScopeFuture.Result. +func (future *DeploymentsDeleteAtSubscriptionScopeFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteAtSubscriptionScopeFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DeploymentsDeleteFuture struct { @@ -681,6 +943,33 @@ type DeploymentsDeleteFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteFuture.Result. +func (future *DeploymentsDeleteFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentValidateResult information from validate template deployment response. type DeploymentValidateResult struct { autorest.Response `json:"-"` @@ -1116,6 +1405,33 @@ type GroupsDeleteFuture struct { Result func(GroupsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GroupsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GroupsDeleteFuture.Result. +func (future *GroupsDeleteFuture) result(client GroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.GroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // HTTPMessage HTTP message. type HTTPMessage struct { // Content - HTTP message content. @@ -1351,6 +1667,33 @@ type MoveResourcesFuture struct { Result func(Client) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *MoveResourcesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for MoveResourcesFuture.Result. +func (future *MoveResourcesFuture) result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.MoveResourcesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.MoveResourcesFuture") + return + } + ar.Response = future.Response() + return +} + // OnErrorDeployment deployment on error behavior. type OnErrorDeployment struct { // Type - The deployment on error behavior type. Possible values are LastSuccessful and SpecificDeployment. Possible values include: 'LastSuccessful', 'SpecificDeployment' @@ -2140,6 +2483,39 @@ type UpdateByIDFuture struct { Result func(Client) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *UpdateByIDFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for UpdateByIDFuture.Result. +func (future *UpdateByIDFuture) result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.UpdateByIDFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.UpdateByIDResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // UpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type UpdateFuture struct { azure.FutureAPI @@ -2148,6 +2524,39 @@ type UpdateFuture struct { Result func(Client) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *UpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for UpdateFuture.Result. +func (future *UpdateFuture) result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.UpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.UpdateResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // ValidateMoveResourcesFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ValidateMoveResourcesFuture struct { @@ -2156,3 +2565,30 @@ type ValidateMoveResourcesFuture struct { // If the operation has not completed it will return an error. Result func(Client) (autorest.Response, error) } + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ValidateMoveResourcesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ValidateMoveResourcesFuture.Result. +func (future *ValidateMoveResourcesFuture) result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ValidateMoveResourcesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.ValidateMoveResourcesFuture") + return + } + ar.Response = future.Response() + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/operations.go index eacd51d58b2..d534dcdab49 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/operations.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/providers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/providers.go index aa6ec9de822..7d8e6aec626 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/providers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/providers.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/resources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/resources.go index be42e11d3ed..44d54bdc606 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/resources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/resources.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -288,30 +277,7 @@ func (client Client) CreateOrUpdateSender(req *http.Request) (future CreateOrUpd var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.CreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.CreateOrUpdateResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -398,30 +364,7 @@ func (client Client) CreateOrUpdateByIDSender(req *http.Request) (future CreateO var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateByIDFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.CreateOrUpdateByIDFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateByIDFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.CreateOrUpdateByIDResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -514,20 +457,7 @@ func (client Client) DeleteSender(req *http.Request) (future DeleteFuture, err e var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -603,20 +533,7 @@ func (client Client) DeleteByIDSender(req *http.Request) (future DeleteByIDFutur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.DeleteByIDFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.DeleteByIDFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1154,20 +1071,7 @@ func (client Client) MoveResourcesSender(req *http.Request) (future MoveResource var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.MoveResourcesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.MoveResourcesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1261,30 +1165,7 @@ func (client Client) UpdateSender(req *http.Request) (future UpdateFuture, err e var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.UpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.UpdateResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1364,30 +1245,7 @@ func (client Client) UpdateByIDSender(req *http.Request) (future UpdateByIDFutur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.UpdateByIDFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.UpdateByIDFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "resources.UpdateByIDFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.UpdateByIDResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.UpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1478,20 +1336,7 @@ func (client Client) ValidateMoveResourcesSender(req *http.Request) (future Vali var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client Client) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "resources.ValidateMoveResourcesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("resources.ValidateMoveResourcesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/tags.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/tags.go index b7b05814c73..10f62a9b418 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/tags.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/tags.go @@ -1,18 +1,7 @@ package resources -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/version.go index 6c1215c665d..d664d893be8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/version.go @@ -2,19 +2,8 @@ package resources import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/CHANGELOG.md index 8e25820466d..51a57adb9a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/CHANGELOG.md @@ -1,5 +1,5 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/resources/resource-manager/readme.md tag: `package-subscriptions-2019-06` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/_meta.json new file mode 100644 index 00000000000..9ad0ac1fab5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "tag": "package-subscriptions-2019-06", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-subscriptions-2019-06 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/client.go index 4fc1de4ff82..0c179da7147 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/client.go @@ -5,19 +5,8 @@ // organization. package subscriptions -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/enums.go index 9e69877e248..20a93dde71a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/enums.go @@ -1,18 +1,7 @@ package subscriptions -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/models.go index c753678d150..88cc65ce392 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/models.go @@ -1,18 +1,7 @@ package subscriptions -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/operations.go index ed6343466ae..34336685b67 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/operations.go @@ -1,18 +1,7 @@ package subscriptions -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/subscriptions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/subscriptions.go index 09a07c61ef2..2b565826c5a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/subscriptions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/subscriptions.go @@ -1,18 +1,7 @@ package subscriptions -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/tenants.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/tenants.go index 2c44a424506..86998671a67 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/tenants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/tenants.go @@ -1,18 +1,7 @@ package subscriptions -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/version.go index 45ee814e6da..96490455e68 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions/version.go @@ -2,19 +2,8 @@ package subscriptions import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/CHANGELOG.md index ae27a78f2a1..f897eda520d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/CHANGELOG.md @@ -1,5 +1,28 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/resources/resource-manager/readme.md tag: `package-resources-2019-07` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *DeploymentsCreateOrUpdateAtManagementGroupScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsCreateOrUpdateAtScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsCreateOrUpdateAtSubscriptionScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsCreateOrUpdateAtTenantScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteAtManagementGroupScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteAtScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteAtSubscriptionScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteAtTenantScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsDeleteFuture.UnmarshalJSON([]byte) error +1. *DeploymentsWhatIfAtSubscriptionScopeFuture.UnmarshalJSON([]byte) error +1. *DeploymentsWhatIfFuture.UnmarshalJSON([]byte) error +1. *ResourceGroupsDeleteFuture.UnmarshalJSON([]byte) error +1. *ResourcesCreateOrUpdateByIDFuture.UnmarshalJSON([]byte) error +1. *ResourcesCreateOrUpdateFuture.UnmarshalJSON([]byte) error +1. *ResourcesDeleteByIDFuture.UnmarshalJSON([]byte) error +1. *ResourcesDeleteFuture.UnmarshalJSON([]byte) error +1. *ResourcesMoveResourcesFuture.UnmarshalJSON([]byte) error +1. *ResourcesUpdateByIDFuture.UnmarshalJSON([]byte) error +1. *ResourcesUpdateFuture.UnmarshalJSON([]byte) error +1. *ResourcesValidateMoveResourcesFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/_meta.json new file mode 100644 index 00000000000..dc43afebbf6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "tag": "package-resources-2019-07", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-resources-2019-07 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/client.go index 8274059781a..a3b36de77bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/client.go @@ -3,19 +3,8 @@ // Provides operations for working with resources and resource groups. package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deploymentoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deploymentoperations.go index ae044473bd7..128801333c7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deploymentoperations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deploymentoperations.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deployments.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deployments.go index a7e8a8ae7a5..c273f8309e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deployments.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/deployments.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -1039,30 +1028,7 @@ func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (future var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1161,30 +1127,7 @@ func (client DeploymentsClient) CreateOrUpdateAtManagementGroupScopeSender(req * var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateAtManagementGroupScopeResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1279,30 +1222,7 @@ func (client DeploymentsClient) CreateOrUpdateAtScopeSender(req *http.Request) ( var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtScopeFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtScopeFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateAtScopeResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtScopeFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1397,30 +1317,7 @@ func (client DeploymentsClient) CreateOrUpdateAtSubscriptionScopeSender(req *htt var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateAtSubscriptionScopeResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1514,30 +1411,7 @@ func (client DeploymentsClient) CreateOrUpdateAtTenantScopeSender(req *http.Requ var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (de DeploymentExtended, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtTenantScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtTenantScopeFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - de.Response.Response, err = future.GetResult(sender) - if de.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtTenantScopeFuture", "Result", nil, "received nil response and error") - } - if err == nil && de.Response.Response.StatusCode != http.StatusNoContent { - de, err = client.CreateOrUpdateAtTenantScopeResponder(de.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtTenantScopeFuture", "Result", de.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1634,20 +1508,7 @@ func (client DeploymentsClient) DeleteSender(req *http.Request) (future Deployme var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1739,20 +1600,7 @@ func (client DeploymentsClient) DeleteAtManagementGroupScopeSender(req *http.Req var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtManagementGroupScopeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1841,20 +1689,7 @@ func (client DeploymentsClient) DeleteAtScopeSender(req *http.Request) (future D var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtScopeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1942,20 +1777,7 @@ func (client DeploymentsClient) DeleteAtSubscriptionScopeSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtSubscriptionScopeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -2042,20 +1864,7 @@ func (client DeploymentsClient) DeleteAtTenantScopeSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtTenantScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtTenantScopeFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -4097,30 +3906,7 @@ func (client DeploymentsClient) WhatIfSender(req *http.Request) (future Deployme var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (wior WhatIfOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsWhatIfFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - wior.Response.Response, err = future.GetResult(sender) - if wior.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfFuture", "Result", nil, "received nil response and error") - } - if err == nil && wior.Response.Response.StatusCode != http.StatusNoContent { - wior, err = client.WhatIfResponder(wior.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfFuture", "Result", wior.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -4210,30 +3996,7 @@ func (client DeploymentsClient) WhatIfAtSubscriptionScopeSender(req *http.Reques var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client DeploymentsClient) (wior WhatIfOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.DeploymentsWhatIfAtSubscriptionScopeFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - wior.Response.Response, err = future.GetResult(sender) - if wior.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfAtSubscriptionScopeFuture", "Result", nil, "received nil response and error") - } - if err == nil && wior.Response.Response.StatusCode != http.StatusNoContent { - wior, err = client.WhatIfAtSubscriptionScopeResponder(wior.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfAtSubscriptionScopeFuture", "Result", wior.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/enums.go index c78f755f02e..f79ae04b985 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/enums.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/models.go index 6ca44f56096..291e0077144 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/models.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -602,6 +591,39 @@ type DeploymentsCreateOrUpdateAtManagementGroupScopeFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateAtManagementGroupScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateAtManagementGroupScopeFuture.Result. +func (future *DeploymentsCreateOrUpdateAtManagementGroupScopeFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateAtManagementGroupScopeResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsCreateOrUpdateAtScopeFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsCreateOrUpdateAtScopeFuture struct { @@ -611,6 +633,39 @@ type DeploymentsCreateOrUpdateAtScopeFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateAtScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateAtScopeFuture.Result. +func (future *DeploymentsCreateOrUpdateAtScopeFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateAtScopeResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtScopeFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsCreateOrUpdateAtSubscriptionScopeFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type DeploymentsCreateOrUpdateAtSubscriptionScopeFuture struct { @@ -620,6 +675,39 @@ type DeploymentsCreateOrUpdateAtSubscriptionScopeFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateAtSubscriptionScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateAtSubscriptionScopeFuture.Result. +func (future *DeploymentsCreateOrUpdateAtSubscriptionScopeFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateAtSubscriptionScopeResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsCreateOrUpdateAtTenantScopeFuture an abstraction for monitoring and retrieving the results of // a long-running operation. type DeploymentsCreateOrUpdateAtTenantScopeFuture struct { @@ -629,6 +717,39 @@ type DeploymentsCreateOrUpdateAtTenantScopeFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateAtTenantScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateAtTenantScopeFuture.Result. +func (future *DeploymentsCreateOrUpdateAtTenantScopeFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtTenantScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateAtTenantScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateAtTenantScopeResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateAtTenantScopeFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsCreateOrUpdateFuture struct { @@ -638,6 +759,39 @@ type DeploymentsCreateOrUpdateFuture struct { Result func(DeploymentsClient) (DeploymentExtended, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsCreateOrUpdateFuture.Result. +func (future *DeploymentsCreateOrUpdateFuture) result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsCreateOrUpdateFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsDeleteAtManagementGroupScopeFuture an abstraction for monitoring and retrieving the results // of a long-running operation. type DeploymentsDeleteAtManagementGroupScopeFuture struct { @@ -647,6 +801,33 @@ type DeploymentsDeleteAtManagementGroupScopeFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteAtManagementGroupScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteAtManagementGroupScopeFuture.Result. +func (future *DeploymentsDeleteAtManagementGroupScopeFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtManagementGroupScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtManagementGroupScopeFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentsDeleteAtScopeFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsDeleteAtScopeFuture struct { @@ -656,6 +837,33 @@ type DeploymentsDeleteAtScopeFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteAtScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteAtScopeFuture.Result. +func (future *DeploymentsDeleteAtScopeFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtScopeFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentsDeleteAtSubscriptionScopeFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsDeleteAtSubscriptionScopeFuture struct { @@ -665,6 +873,33 @@ type DeploymentsDeleteAtSubscriptionScopeFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteAtSubscriptionScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteAtSubscriptionScopeFuture.Result. +func (future *DeploymentsDeleteAtSubscriptionScopeFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtSubscriptionScopeFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentsDeleteAtTenantScopeFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsDeleteAtTenantScopeFuture struct { @@ -674,6 +909,33 @@ type DeploymentsDeleteAtTenantScopeFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteAtTenantScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteAtTenantScopeFuture.Result. +func (future *DeploymentsDeleteAtTenantScopeFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteAtTenantScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteAtTenantScopeFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DeploymentsDeleteFuture struct { @@ -683,6 +945,33 @@ type DeploymentsDeleteFuture struct { Result func(DeploymentsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsDeleteFuture.Result. +func (future *DeploymentsDeleteFuture) result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // DeploymentsWhatIfAtSubscriptionScopeFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type DeploymentsWhatIfAtSubscriptionScopeFuture struct { @@ -692,6 +981,39 @@ type DeploymentsWhatIfAtSubscriptionScopeFuture struct { Result func(DeploymentsClient) (WhatIfOperationResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsWhatIfAtSubscriptionScopeFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsWhatIfAtSubscriptionScopeFuture.Result. +func (future *DeploymentsWhatIfAtSubscriptionScopeFuture) result(client DeploymentsClient) (wior WhatIfOperationResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsWhatIfAtSubscriptionScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if wior.Response.Response, err = future.GetResult(sender); err == nil && wior.Response.Response.StatusCode != http.StatusNoContent { + wior, err = client.WhatIfAtSubscriptionScopeResponder(wior.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfAtSubscriptionScopeFuture", "Result", wior.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentsWhatIfFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type DeploymentsWhatIfFuture struct { @@ -701,6 +1023,39 @@ type DeploymentsWhatIfFuture struct { Result func(DeploymentsClient) (WhatIfOperationResult, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DeploymentsWhatIfFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DeploymentsWhatIfFuture.Result. +func (future *DeploymentsWhatIfFuture) result(client DeploymentsClient) (wior WhatIfOperationResult, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.DeploymentsWhatIfFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if wior.Response.Response, err = future.GetResult(sender); err == nil && wior.Response.Response.StatusCode != http.StatusNoContent { + wior, err = client.WhatIfResponder(wior.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.DeploymentsWhatIfFuture", "Result", wior.Response.Response, "Failure responding to request") + } + } + return +} + // DeploymentValidateResult information from validate template deployment response. type DeploymentValidateResult struct { autorest.Response `json:"-"` @@ -1699,6 +2054,33 @@ type ResourceGroupsDeleteFuture struct { Result func(ResourceGroupsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourceGroupsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourceGroupsDeleteFuture.Result. +func (future *ResourceGroupsDeleteFuture) result(client ResourceGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourceGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourceGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ResourceListResult list of resource groups. type ResourceListResult struct { autorest.Response `json:"-"` @@ -1890,6 +2272,39 @@ type ResourcesCreateOrUpdateByIDFuture struct { Result func(ResourcesClient) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesCreateOrUpdateByIDFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesCreateOrUpdateByIDFuture.Result. +func (future *ResourcesCreateOrUpdateByIDFuture) result(client ResourcesClient) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesCreateOrUpdateByIDFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.CreateOrUpdateByIDResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // ResourcesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ResourcesCreateOrUpdateFuture struct { @@ -1899,6 +2314,39 @@ type ResourcesCreateOrUpdateFuture struct { Result func(ResourcesClient) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesCreateOrUpdateFuture.Result. +func (future *ResourcesCreateOrUpdateFuture) result(client ResourcesClient) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.CreateOrUpdateResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // ResourcesDeleteByIDFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ResourcesDeleteByIDFuture struct { @@ -1908,6 +2356,33 @@ type ResourcesDeleteByIDFuture struct { Result func(ResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesDeleteByIDFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesDeleteByIDFuture.Result. +func (future *ResourcesDeleteByIDFuture) result(client ResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesDeleteByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesDeleteByIDFuture") + return + } + ar.Response = future.Response() + return +} + // ResourcesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ResourcesDeleteFuture struct { @@ -1917,6 +2392,33 @@ type ResourcesDeleteFuture struct { Result func(ResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesDeleteFuture.Result. +func (future *ResourcesDeleteFuture) result(client ResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + // ResourcesMoveInfo parameters of move resources. type ResourcesMoveInfo struct { // Resources - The IDs of the resources. @@ -1934,6 +2436,33 @@ type ResourcesMoveResourcesFuture struct { Result func(ResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesMoveResourcesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesMoveResourcesFuture.Result. +func (future *ResourcesMoveResourcesFuture) result(client ResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesMoveResourcesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesMoveResourcesFuture") + return + } + ar.Response = future.Response() + return +} + // ResourcesUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ResourcesUpdateByIDFuture struct { @@ -1943,6 +2472,39 @@ type ResourcesUpdateByIDFuture struct { Result func(ResourcesClient) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesUpdateByIDFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesUpdateByIDFuture.Result. +func (future *ResourcesUpdateByIDFuture) result(client ResourcesClient) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesUpdateByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesUpdateByIDFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.UpdateByIDResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesUpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // ResourcesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type ResourcesUpdateFuture struct { @@ -1952,6 +2514,39 @@ type ResourcesUpdateFuture struct { Result func(ResourcesClient) (GenericResource, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesUpdateFuture.Result. +func (future *ResourcesUpdateFuture) result(client ResourcesClient) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.UpdateResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesUpdateFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + // ResourcesValidateMoveResourcesFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ResourcesValidateMoveResourcesFuture struct { @@ -1961,6 +2556,33 @@ type ResourcesValidateMoveResourcesFuture struct { Result func(ResourcesClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ResourcesValidateMoveResourcesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ResourcesValidateMoveResourcesFuture.Result. +func (future *ResourcesValidateMoveResourcesFuture) result(client ResourcesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ResourcesValidateMoveResourcesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("features.ResourcesValidateMoveResourcesFuture") + return + } + ar.Response = future.Response() + return +} + // Sku SKU for the resource. type Sku struct { // Name - The SKU name. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/operations.go index cbc64f1bb29..82be51f7274 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/operations.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/providers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/providers.go index 1be433aeeb3..8b941330156 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/providers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/providers.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resourcegroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resourcegroups.go index 266d24197fc..9d9b3f6881d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resourcegroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resourcegroups.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -284,20 +273,7 @@ func (client ResourceGroupsClient) DeleteSender(req *http.Request) (future Resou var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourceGroupsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourceGroupsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourceGroupsDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resources.go index 1979e5f98d7..83bc7523b66 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/resources.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -288,30 +277,7 @@ func (client ResourcesClient) CreateOrUpdateSender(req *http.Request) (future Re var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.CreateOrUpdateResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -398,30 +364,7 @@ func (client ResourcesClient) CreateOrUpdateByIDSender(req *http.Request) (futur var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateByIDFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesCreateOrUpdateByIDFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateByIDFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.CreateOrUpdateByIDResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesCreateOrUpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -514,20 +457,7 @@ func (client ResourcesClient) DeleteSender(req *http.Request) (future ResourcesD var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesDeleteFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -603,20 +533,7 @@ func (client ResourcesClient) DeleteByIDSender(req *http.Request) (future Resour var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesDeleteByIDFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesDeleteByIDFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1154,20 +1071,7 @@ func (client ResourcesClient) MoveResourcesSender(req *http.Request) (future Res var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesMoveResourcesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesMoveResourcesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } @@ -1261,30 +1165,7 @@ func (client ResourcesClient) UpdateSender(req *http.Request) (future ResourcesU var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.ResourcesUpdateFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.UpdateResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesUpdateFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1364,30 +1245,7 @@ func (client ResourcesClient) UpdateByIDSender(req *http.Request) (future Resour var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (gr GenericResource, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesUpdateByIDFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesUpdateByIDFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - gr.Response.Response, err = future.GetResult(sender) - if gr.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "features.ResourcesUpdateByIDFuture", "Result", nil, "received nil response and error") - } - if err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { - gr, err = client.UpdateByIDResponder(gr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesUpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -1478,20 +1336,7 @@ func (client ResourcesClient) ValidateMoveResourcesSender(req *http.Request) (fu var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client ResourcesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "features.ResourcesValidateMoveResourcesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("features.ResourcesValidateMoveResourcesFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/tags.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/tags.go index a63748e7fa9..d951d93555e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/tags.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/tags.go @@ -1,18 +1,7 @@ package features -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/version.go index 5e6c5a997bd..9ecc156fda6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/features/version.go @@ -2,19 +2,8 @@ package features import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/CHANGELOG.md index f0995918739..3071516d54e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/CHANGELOG.md @@ -1,5 +1,9 @@ Generated from https://github.com/Azure/azure-rest-api-specs/tree/3c764635e7d442b3e74caf593029fcd440b3ef82//specification/storage/resource-manager/readme.md tag: `package-2019-04` -Code generator @microsoft.azure/autorest.go@2.1.175 +Code generator @microsoft.azure/autorest.go@2.1.178 +### New Funcs + +1. *AccountsCreateFuture.UnmarshalJSON([]byte) error +1. *AccountsFailoverFuture.UnmarshalJSON([]byte) error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/_meta.json new file mode 100644 index 00000000000..f633d8c54d3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/_meta.json @@ -0,0 +1,10 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", + "tag": "package-2019-04", + "use": "@microsoft.azure/autorest.go@2.1.178", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.178 --go --verbose --go-sdk-folder=/_/azure-sdk-for-go --tag=package-2019-04 --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go index f15bd3bc09d..39930b40179 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/accounts.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -229,30 +218,7 @@ func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCre var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AccountsClient) (a Account, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - a.Response.Response, err = future.GetResult(sender) - if a.Response.Response == nil && err == nil { - err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", nil, "received nil response and error") - } - if err == nil && a.Response.Response.StatusCode != http.StatusNoContent { - a, err = client.CreateResponder(a.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request") - } - } - return - } + future.Result = future.result return } @@ -437,20 +403,7 @@ func (client AccountsClient) FailoverSender(req *http.Request) (future AccountsF var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf - future.Result = func(client AccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsFailoverFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("storage.AccountsFailoverFuture") - return - } - ar.Response = future.Response() - return - } + future.Result = future.result return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobcontainers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobcontainers.go index f22cd166b8f..e697812f353 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobcontainers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobcontainers.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go index abb849d4b39..76d5f2eb80d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobservices.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/client.go index 3cf659354e9..6178598d2f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/client.go @@ -3,19 +3,8 @@ // The Azure Storage Management API. package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/enums.go index 134efee6ffc..73dee669778 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/enums.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go index 5fa4da43f1c..9e01721a085 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileservices.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go index 817eb1a17e3..63def4e16c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/fileshares.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/managementpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/managementpolicies.go index 45b8f319b94..72854deb9b8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/managementpolicies.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/managementpolicies.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go index a107802966f..b527cc55556 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/models.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. @@ -613,6 +602,39 @@ type AccountsCreateFuture struct { Result func(AccountsClient) (Account, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AccountsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AccountsCreateFuture.Result. +func (future *AccountsCreateFuture) result(client AccountsClient) (a Account, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent { + a, err = client.CreateResponder(a.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request") + } + } + return +} + // AccountsFailoverFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type AccountsFailoverFuture struct { @@ -622,6 +644,33 @@ type AccountsFailoverFuture struct { Result func(AccountsClient) (autorest.Response, error) } +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AccountsFailoverFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AccountsFailoverFuture.Result. +func (future *AccountsFailoverFuture) result(client AccountsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsFailoverFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("storage.AccountsFailoverFuture") + return + } + ar.Response = future.Response() + return +} + // AccountUpdateParameters the parameters that can be provided when updating the storage account // properties. type AccountUpdateParameters struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/operations.go index 147b715d4ab..2ac9faeb71c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/operations.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/skus.go index 1fff42f0bc0..1f3350c01c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/skus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/skus.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/usages.go index d63f7b38758..8c0fc598ba7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/usages.go @@ -1,18 +1,7 @@ package storage -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/version.go index b22e8dfbddf..82ca34f9f54 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/version.go @@ -2,19 +2,8 @@ package storage import "github.com/Azure/azure-sdk-for-go/version" -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go index 2f37bede66e..306dd1b711a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bytes" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go index 76794c30518..01741524afd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -1,19 +1,8 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bytes" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index 0b02e52bd7a..462e3dcf2fb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go index 62e461a5592..89ab054ec2c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "errors" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go index 02fa5929cc1..0a985b22b0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go index bd19eccc41e..9d445decfd1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bytes" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index f84c7d5e94a..0f4d6279e62 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -1,19 +1,8 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bufio" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go index e898e9bfaf9..a203fce8d44 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "net/url" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go index 056473d49a2..ae2862c8688 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go index 151e9a51072..3696e804fe0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "errors" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go index 6c0c6caf7d3..498e9837c51 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go index dd64eddef7f..9ef63c8dd97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bytes" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index 6a480b12a80..9848025ccbe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "errors" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go index 1db8e7da610..6a12d6dcbaa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go index 5b4a65145b6..6453477ba61 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "errors" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go index ffc183be619..e5447e4a138 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go index 0690e85ad9b..3b057223875 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // MetadataLevel determines if operations should return a paylod, // and it level of detail. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go index 7ffd63821d7..ff93ec2ac9b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index f90050cb002..7731e4ebc1e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go index 28d9ab937e2..ab39f956fb0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "errors" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go index 29febe146f6..752701c3bdf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. // QueueServiceClient contains operations for Microsoft Azure Queue Storage // Service. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go index cf75a265915..30f7c14350a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go index 056ab398a81..35d13670cb5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "strings" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go index dc41992227c..d139db77653 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "net/http" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 0febf077f6b..fc8631ee20c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bytes" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go index 5b05e3e2afa..b5aaefe4738 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bytes" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go index 1f063a39520..8eccd5927b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "encoding/json" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 39e439fb676..47a871991dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -1,18 +1,7 @@ package storage -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. import ( "bytes" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index baf3c436ca9..b6f2ce92461 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v52.5.0" +const Number = "v53.2.0" diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE index 8a58c222086..6d0eb9d5d68 100644 --- a/vendor/github.com/MakeNowJust/heredoc/LICENSE +++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2017 TSUYUSATO Kitsune +Copyright (c) 2014-2019 TSUYUSATO Kitsune Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md index a3a65faba1a..e9924d2974a 100644 --- a/vendor/github.com/MakeNowJust/heredoc/README.md +++ b/vendor/github.com/MakeNowJust/heredoc/README.md @@ -1,4 +1,6 @@ -# heredoc [![CircleCI](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/MakeNowJust/heredoc) +# heredoc + +[![Build Status](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![GoDoc](https://godoc.org/github.com/MakeNowJusti/heredoc?status.svg)](https://godoc.org/github.com/MakeNowJust/heredoc) ## About @@ -15,8 +17,6 @@ $ go get github.com/MakeNowJust/heredoc ```go // usual import "github.com/MakeNowJust/heredoc" -// shortcuts -import . "github.com/MakeNowJust/heredoc/dot" ``` ## Example @@ -26,11 +26,11 @@ package main import ( "fmt" - . "github.com/MakeNowJust/heredoc/dot" + "github.com/MakeNowJust/heredoc" ) func main() { - fmt.Println(D(` + fmt.Println(heredoc.Doc(` Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, ... @@ -45,8 +45,7 @@ func main() { ## API Document - - [Go Walker - github.com/MakeNowJust/heredoc](https://gowalker.org/github.com/MakeNowJust/heredoc) - - [Go Walker - github.com/MakeNowJust/heredoc/dot](https://gowalker.org/github.com/MakeNowJust/heredoc/dot) + - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc) ## License diff --git a/vendor/github.com/MakeNowJust/heredoc/go.mod b/vendor/github.com/MakeNowJust/heredoc/go.mod new file mode 100644 index 00000000000..52b0dfd49cb --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/go.mod @@ -0,0 +1,3 @@ +module github.com/MakeNowJust/heredoc + +go 1.12 diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go index fea12e622f1..1fc04695551 100644 --- a/vendor/github.com/MakeNowJust/heredoc/heredoc.go +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go @@ -1,24 +1,31 @@ -// Copyright (c) 2014-2017 TSUYUSATO Kitsune +// Copyright (c) 2014-2019 TSUYUSATO Kitsune // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php // Package heredoc provides creation of here-documents from raw strings. // // Golang supports raw-string syntax. +// // doc := ` // Foo // Bar // ` +// // But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to +// // "\n\tFoo\n\tBar\n" +// // I dont't want this! // // However this problem is solved by package heredoc. +// // doc := heredoc.Doc(` // Foo // Bar // `) +// // Is equivalent to +// // "Foo\nBar\n" package heredoc @@ -33,7 +40,7 @@ const maxInt = int(^uint(0) >> 1) // Doc returns un-indented string as here-document. func Doc(raw string) string { skipFirstLine := false - if raw[0] == '\n' { + if len(raw) > 0 && raw[0] == '\n' { raw = raw[1:] } else { skipFirstLine = true diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 00000000000..ae1b4942b91 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index ada2fbab632..3ab6bff69c5 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -5,21 +5,14 @@ package winio import ( "os" "runtime" - "syscall" "unsafe" -) - -//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx -//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle -const ( - fileBasicInfo = 0 - fileIDInfo = 0x12 + "golang.org/x/sys/windows" ) // FileBasicInfo contains file access time and file attributes information. type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 pad uint32 // padding } @@ -27,7 +20,7 @@ type FileBasicInfo struct { // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &FileBasicInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -36,13 +29,32 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) return nil } +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + // FileIDInfo contains the volume serial number and file ID for a file. This pair should be // unique on a system. type FileIDInfo struct { @@ -53,7 +65,7 @@ type FileIDInfo struct { // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod index a2eb6496cf8..98a8dea0e7e 100644 --- a/vendor/github.com/Microsoft/go-winio/go.mod +++ b/vendor/github.com/Microsoft/go-winio/go.mod @@ -4,6 +4,6 @@ go 1.12 require ( github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.4.1 - golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 + github.com/sirupsen/logrus v1.7.0 + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c ) diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum index 6da76a492ee..aa6ad3b571a 100644 --- a/vendor/github.com/Microsoft/go-winio/go.sum +++ b/vendor/github.com/Microsoft/go-winio/go.sum @@ -1,16 +1,14 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index dbfe790ee00..b632f8f8bb9 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -1,3 +1,5 @@ +// +build windows + package winio import ( diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index 58640657704..f497c0e3917 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -1,3 +1,5 @@ +// +build windows + // Package guid provides a GUID type. The backing structure for a GUID is // identical to that used by the golang.org/x/sys/windows GUID type. // There are two main binary encodings used for a GUID, the big-endian encoding, diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go index 5cb52bc7462..5955c99fdea 100644 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,3 @@ package winio -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index e26b01fafb2..176ff75e320 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -19,6 +19,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +27,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -37,514 +38,382 @@ func errnoErr(e syscall.Errno) error { } var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") modntdll = windows.NewLazySystemDLL("ntdll.dll") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procLocalFree = modkernel32.NewProc("LocalFree") procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procbind = modws2_32.NewProc("bind") ) -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 } - return -} - -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) } return } -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } else { - _p0 = 0 +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) + _p0, err = syscall.UTF16PtrFromString(accountName) if err != nil { return } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) } -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) + _p0, err = syscall.UTF16PtrFromString(systemName) if err != nil { return } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) } -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return } - return + return _lookupPrivilegeName(_p0, luid, buffer, size) } -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) - return +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } return } -func rtlNtStatusToDosError(status ntstatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return } -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) + _p0, err = syscall.UTF16PtrFromString(name) if err != nil { return } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) } -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) } return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) } return } -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - -func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return } - return + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } -func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) } return } -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) return } -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) return } -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return } -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) return } -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return } -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) return } -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 - } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) } return } -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -552,11 +421,7 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socketError { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index e42c5cdbb2e..260a37cbbab 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -95,7 +95,7 @@ import ( // StdinTokenProvider will prompt on stderr and read from stdin for a string value. // An error is returned if reading from stdin fails. // -// Use this function go read MFA tokens from stdin. The function makes no attempt +// Use this function to read MFA tokens from stdin. The function makes no attempt // to make atomic prompts from stdin across multiple gorouties. // // Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index f472e558c6f..c102146b3ff 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -21,6 +21,7 @@ const ( ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). @@ -121,6 +122,9 @@ var awsPartition = partition{ "ap-northeast-2": region{ Description: "Asia Pacific (Seoul)", }, + "ap-northeast-3": region{ + Description: "Asia Pacific (Osaka)", + }, "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, @@ -184,6 +188,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -239,6 +244,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -361,12 +367,13 @@ var awsPartition = partition{ "amplifybackend": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -452,6 +459,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-northeast-3": endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "ap-south-1": endpoint{ Hostname: "api.ecr.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -706,6 +719,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -730,6 +744,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -765,6 +780,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -915,6 +931,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -967,6 +984,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -988,9 +1006,11 @@ var awsPartition = partition{ "batch": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1120,6 +1140,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1385,6 +1406,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1708,6 +1730,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1756,6 +1779,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -1765,7 +1789,11 @@ var awsPartition = partition{ "contact-lens": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1902,6 +1930,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1963,6 +1992,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2140,6 +2170,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2202,6 +2233,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2259,6 +2291,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2325,6 +2358,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2376,6 +2410,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2425,6 +2460,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2456,6 +2492,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2505,6 +2542,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2539,6 +2577,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "fips-ap-northeast-3": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "fips-ap-south-1": endpoint{ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2652,6 +2696,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2704,6 +2749,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2783,9 +2829,19 @@ var awsPartition = partition{ "emr-containers": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "entitlement.marketplace": service{ @@ -2805,6 +2861,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2836,6 +2893,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2885,6 +2943,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3181,6 +3240,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3236,6 +3296,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3302,8 +3363,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "af-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "fips-us-east-2": endpoint{ Hostname: "groundstation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ @@ -3317,6 +3385,7 @@ var awsPartition = partition{ }, }, "me-south-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, @@ -3331,6 +3400,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3696,6 +3766,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3789,6 +3860,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3847,11 +3919,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "lambda": service{ @@ -3861,6 +3934,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3910,6 +3984,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3977,6 +4052,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4019,6 +4095,14 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "lookoutequipment": service{ + + Endpoints: endpoints{ + "ap-northeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "lookoutvision": service{ Endpoints: endpoints{ @@ -4064,6 +4148,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4276,6 +4361,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4350,6 +4436,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4398,6 +4485,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4754,6 +4842,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "personalize": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "pinpoint": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -4957,12 +5061,42 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rds": service{ @@ -4972,6 +5106,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5029,6 +5164,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5132,6 +5268,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5327,6 +5464,90 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-af-south-1": endpoint{ + Hostname: "s3-accesspoint.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-east-1": endpoint{ + Hostname: "s3-accesspoint.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-3": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-south-1": endpoint{ + Hostname: "s3-accesspoint.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-central-1": endpoint{ + Hostname: "s3-accesspoint.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-north-1": endpoint{ + Hostname: "s3-accesspoint.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-south-1": endpoint{ + Hostname: "s3-accesspoint.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-1": endpoint{ + Hostname: "s3-accesspoint.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-2": endpoint{ + Hostname: "s3-accesspoint.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-3": endpoint{ + Hostname: "s3-accesspoint.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-me-south-1": endpoint{ + Hostname: "s3-accesspoint.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-sa-east-1": endpoint{ + Hostname: "s3-accesspoint.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ @@ -5334,6 +5555,7 @@ var awsPartition = partition{ SignatureVersions: []string{"s3", "s3v4"}, }, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{ Hostname: "s3.ap-southeast-1.amazonaws.com", @@ -5358,8 +5580,28 @@ var awsPartition = partition{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "me-south-1": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", @@ -5410,6 +5652,13 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-northeast-3": endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "ap-south-1": endpoint{ Hostname: "s3-control.ap-south-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, @@ -5605,6 +5854,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5654,6 +5904,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5846,6 +6097,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5956,6 +6208,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6078,6 +6331,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6130,6 +6384,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6166,12 +6421,10 @@ var awsPartition = partition{ }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "ssm": service{ @@ -6181,6 +6434,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6236,6 +6490,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6381,6 +6636,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6448,6 +6704,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6497,6 +6754,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6582,6 +6840,8 @@ var awsPartition = partition{ "transfer": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -6623,11 +6883,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "translate": service{ @@ -6973,6 +7234,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -7003,6 +7265,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -7573,7 +7836,8 @@ var awscnPartition = partition{ "lakeformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "lambda": service{ @@ -7617,6 +7881,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "mq": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "neptune": service{ Endpoints: endpoints{ @@ -7641,6 +7912,12 @@ var awscnPartition = partition{ }, }, }, + "personalize": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "polly": service{ Endpoints: endpoints{ @@ -7704,6 +7981,14 @@ var awscnPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-cn-north-1": endpoint{ + Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-cn-northwest-1": endpoint{ + Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, @@ -7997,6 +8282,27 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "api.ecr": service{ Endpoints: endpoints{ @@ -8150,18 +8456,6 @@ var awsusgovPartition = partition{ "batch": service{ Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -8686,6 +8980,27 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "fsx": service{ Endpoints: endpoints{ @@ -9126,8 +9441,18 @@ var awsusgovPartition = partition{ "ram": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "rds": service{ @@ -9259,6 +9584,22 @@ var awsusgovPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "fips-us-gov-west-1": endpoint{ Hostname: "s3-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -9392,6 +9733,27 @@ var awsusgovPartition = partition{ }, }, }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -10381,6 +10743,19 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "route53": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "route53.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, "s3": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index d71f7b3f4fa..1737c2686de 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -689,9 +689,12 @@ func (ctx *signingCtx) buildBodyDigest() error { if hash == "" { includeSHA256Header := ctx.unsignedPayload || ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda" || ctx.ServiceName == "glacier" - s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + s3Presign := ctx.isPresign && + (ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda") if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 1022b01df20..d3f10497561 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.12" +const SDKVersion = "1.38.20" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go index 7a8e46fbdae..3079e4ab0e2 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -7,6 +7,21 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" ) +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + // Resource provides the interfaces abstracting ARNs of specific resource // types. type Resource interface { @@ -29,9 +44,10 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err return nil, InvalidARNError{ARN: a, Reason: "partition not set"} } - if a.Service != "s3" && a.Service != "s3-outposts" { + if !isSupportedServiceARN(a.Service) { return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} } + if len(a.Resource) == 0 { return nil, InvalidARNError{ARN: a, Reason: "resource not set"} } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 00000000000..513154cc0e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 47d41ee5a39..905092b9730 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -1314,18 +1314,18 @@ func (c *EC2) AssociateEnclaveCertificateIamRoleRequest(input *AssociateEnclaveC // see AWS Certificate Manager for Nitro Enclaves (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html) // in the AWS Nitro Enclaves User Guide. // -// When the IAM role is associated with the ACM certificate, places the certificate, -// certificate chain, and encrypted private key in an Amazon S3 bucket that -// only the associated IAM role can access. The private key of the certificate +// When the IAM role is associated with the ACM certificate, the certificate, +// certificate chain, and encrypted private key are placed in an Amazon S3 bucket +// that only the associated IAM role can access. The private key of the certificate // is encrypted with an AWS-managed KMS customer master (CMK) that has an attached // attestation-based CMK policy. // // To enable the IAM role to access the Amazon S3 object, you must grant it // permission to call s3:GetObject on the Amazon S3 bucket returned by the command. // To enable the IAM role to access the AWS KMS CMK, you must grant it permission -// to call kms:Decrypt on AWS KMS CMK returned by the command. For more information, -// see Grant the role permission to access the certificate and encryption key -// (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html#add-policy) +// to call kms:Decrypt on the AWS KMS CMK returned by the command. For more +// information, see Grant the role permission to access the certificate and +// encryption key (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html#add-policy) // in the AWS Nitro Enclaves User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3399,7 +3399,8 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out // // Initiates the copy of an AMI. You can copy an AMI from one Region to another, // or from a Region to an AWS Outpost. You can't copy an AMI from an Outpost -// to a Region, from one Outpost to another, or within the same Outpost. +// to a Region, from one Outpost to another, or within the same Outpost. To +// copy an AMI to another partition, see CreateStoreImageTask (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html). // // To copy an AMI from one Region to another, specify the source Region using // the SourceRegion parameter, and specify the destination Region using its @@ -4711,7 +4712,7 @@ func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInp // // For information about the supported operating systems, image formats, and // known limitations for the types of instances you can export, see Exporting -// an Instance as a VM Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport.html) +// an instance as a VM Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport.html) // in the VM Import/Export User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5859,6 +5860,85 @@ func (c *EC2) CreatePlacementGroupWithContext(ctx aws.Context, input *CreatePlac return out, req.Send() } +const opCreateReplaceRootVolumeTask = "CreateReplaceRootVolumeTask" + +// CreateReplaceRootVolumeTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateReplaceRootVolumeTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateReplaceRootVolumeTask for more information on using the CreateReplaceRootVolumeTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateReplaceRootVolumeTaskRequest method. +// req, resp := client.CreateReplaceRootVolumeTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReplaceRootVolumeTask +func (c *EC2) CreateReplaceRootVolumeTaskRequest(input *CreateReplaceRootVolumeTaskInput) (req *request.Request, output *CreateReplaceRootVolumeTaskOutput) { + op := &request.Operation{ + Name: opCreateReplaceRootVolumeTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReplaceRootVolumeTaskInput{} + } + + output = &CreateReplaceRootVolumeTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateReplaceRootVolumeTask API operation for Amazon Elastic Compute Cloud. +// +// Creates a root volume replacement task for an Amazon EC2 instance. The root +// volume can either be restored to its initial launch state, or it can be restored +// using a specific snapshot. +// +// For more information, see Replace a root volume (https://docs.aws.amazon.com/) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateReplaceRootVolumeTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReplaceRootVolumeTask +func (c *EC2) CreateReplaceRootVolumeTask(input *CreateReplaceRootVolumeTaskInput) (*CreateReplaceRootVolumeTaskOutput, error) { + req, out := c.CreateReplaceRootVolumeTaskRequest(input) + return out, req.Send() +} + +// CreateReplaceRootVolumeTaskWithContext is the same as CreateReplaceRootVolumeTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateReplaceRootVolumeTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateReplaceRootVolumeTaskWithContext(ctx aws.Context, input *CreateReplaceRootVolumeTaskInput, opts ...request.Option) (*CreateReplaceRootVolumeTaskOutput, error) { + req, out := c.CreateReplaceRootVolumeTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateReservedInstancesListing = "CreateReservedInstancesListing" // CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the @@ -5955,6 +6035,88 @@ func (c *EC2) CreateReservedInstancesListingWithContext(ctx aws.Context, input * return out, req.Send() } +const opCreateRestoreImageTask = "CreateRestoreImageTask" + +// CreateRestoreImageTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateRestoreImageTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateRestoreImageTask for more information on using the CreateRestoreImageTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateRestoreImageTaskRequest method. +// req, resp := client.CreateRestoreImageTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRestoreImageTask +func (c *EC2) CreateRestoreImageTaskRequest(input *CreateRestoreImageTaskInput) (req *request.Request, output *CreateRestoreImageTaskOutput) { + op := &request.Operation{ + Name: opCreateRestoreImageTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRestoreImageTaskInput{} + } + + output = &CreateRestoreImageTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRestoreImageTask API operation for Amazon Elastic Compute Cloud. +// +// Starts a task that restores an AMI from an S3 object that was previously +// created by using CreateStoreImageTask (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html). +// +// To use this API, you must have the required permissions. For more information, +// see Permissions for storing and restoring AMIs using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Store and restore an AMI using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateRestoreImageTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRestoreImageTask +func (c *EC2) CreateRestoreImageTask(input *CreateRestoreImageTaskInput) (*CreateRestoreImageTaskOutput, error) { + req, out := c.CreateRestoreImageTaskRequest(input) + return out, req.Send() +} + +// CreateRestoreImageTaskWithContext is the same as CreateRestoreImageTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRestoreImageTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateRestoreImageTaskWithContext(ctx aws.Context, input *CreateRestoreImageTaskInput, opts ...request.Option) (*CreateRestoreImageTaskOutput, error) { + req, out := c.CreateRestoreImageTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateRoute = "CreateRoute" // CreateRouteRequest generates a "aws/request.Request" representing the @@ -6496,6 +6658,87 @@ func (c *EC2) CreateSpotDatafeedSubscriptionWithContext(ctx aws.Context, input * return out, req.Send() } +const opCreateStoreImageTask = "CreateStoreImageTask" + +// CreateStoreImageTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateStoreImageTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateStoreImageTask for more information on using the CreateStoreImageTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateStoreImageTaskRequest method. +// req, resp := client.CreateStoreImageTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateStoreImageTask +func (c *EC2) CreateStoreImageTaskRequest(input *CreateStoreImageTaskInput) (req *request.Request, output *CreateStoreImageTaskOutput) { + op := &request.Operation{ + Name: opCreateStoreImageTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStoreImageTaskInput{} + } + + output = &CreateStoreImageTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateStoreImageTask API operation for Amazon Elastic Compute Cloud. +// +// Stores an AMI as a single object in an S3 bucket. +// +// To use this API, you must have the required permissions. For more information, +// see Permissions for storing and restoring AMIs using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Store and restore an AMI using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateStoreImageTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateStoreImageTask +func (c *EC2) CreateStoreImageTask(input *CreateStoreImageTaskInput) (*CreateStoreImageTaskOutput, error) { + req, out := c.CreateStoreImageTaskRequest(input) + return out, req.Send() +} + +// CreateStoreImageTaskWithContext is the same as CreateStoreImageTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateStoreImageTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateStoreImageTaskWithContext(ctx aws.Context, input *CreateStoreImageTaskInput, opts ...request.Option) (*CreateStoreImageTaskOutput, error) { + req, out := c.CreateStoreImageTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateSubnet = "CreateSubnet" // CreateSubnetRequest generates a "aws/request.Request" representing the @@ -21552,6 +21795,140 @@ func (c *EC2) DescribeRegionsWithContext(ctx aws.Context, input *DescribeRegions return out, req.Send() } +const opDescribeReplaceRootVolumeTasks = "DescribeReplaceRootVolumeTasks" + +// DescribeReplaceRootVolumeTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplaceRootVolumeTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReplaceRootVolumeTasks for more information on using the DescribeReplaceRootVolumeTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReplaceRootVolumeTasksRequest method. +// req, resp := client.DescribeReplaceRootVolumeTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReplaceRootVolumeTasks +func (c *EC2) DescribeReplaceRootVolumeTasksRequest(input *DescribeReplaceRootVolumeTasksInput) (req *request.Request, output *DescribeReplaceRootVolumeTasksOutput) { + op := &request.Operation{ + Name: opDescribeReplaceRootVolumeTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplaceRootVolumeTasksInput{} + } + + output = &DescribeReplaceRootVolumeTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReplaceRootVolumeTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes a root volume replacement task. For more information, see Replace +// a root volume (https://docs.aws.amazon.com/) in the Amazon Elastic Compute +// Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeReplaceRootVolumeTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReplaceRootVolumeTasks +func (c *EC2) DescribeReplaceRootVolumeTasks(input *DescribeReplaceRootVolumeTasksInput) (*DescribeReplaceRootVolumeTasksOutput, error) { + req, out := c.DescribeReplaceRootVolumeTasksRequest(input) + return out, req.Send() +} + +// DescribeReplaceRootVolumeTasksWithContext is the same as DescribeReplaceRootVolumeTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReplaceRootVolumeTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReplaceRootVolumeTasksWithContext(ctx aws.Context, input *DescribeReplaceRootVolumeTasksInput, opts ...request.Option) (*DescribeReplaceRootVolumeTasksOutput, error) { + req, out := c.DescribeReplaceRootVolumeTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeReplaceRootVolumeTasksPages iterates over the pages of a DescribeReplaceRootVolumeTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplaceRootVolumeTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplaceRootVolumeTasks operation. +// pageNum := 0 +// err := client.DescribeReplaceRootVolumeTasksPages(params, +// func(page *ec2.DescribeReplaceRootVolumeTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeReplaceRootVolumeTasksPages(input *DescribeReplaceRootVolumeTasksInput, fn func(*DescribeReplaceRootVolumeTasksOutput, bool) bool) error { + return c.DescribeReplaceRootVolumeTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReplaceRootVolumeTasksPagesWithContext same as DescribeReplaceRootVolumeTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReplaceRootVolumeTasksPagesWithContext(ctx aws.Context, input *DescribeReplaceRootVolumeTasksInput, fn func(*DescribeReplaceRootVolumeTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReplaceRootVolumeTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReplaceRootVolumeTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReplaceRootVolumeTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeReservedInstances = "DescribeReservedInstances" // DescribeReservedInstancesRequest generates a "aws/request.Request" representing the @@ -23456,10 +23833,10 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp // pricing history (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) // in the Amazon EC2 User Guide for Linux Instances. // -// When you specify a start and end time, this operation returns the prices -// of the instance types within the time range that you specified and the time -// when the price changed. The price is valid within the time period that you -// specified; the response merely indicates the last time that the price changed. +// When you specify a start and end time, the operation returns the prices of +// the instance types within that time range. It also returns the last price +// change before the start time, which is the effective price as of the start +// time. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -23676,6 +24053,154 @@ func (c *EC2) DescribeStaleSecurityGroupsPagesWithContext(ctx aws.Context, input return p.Err() } +const opDescribeStoreImageTasks = "DescribeStoreImageTasks" + +// DescribeStoreImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStoreImageTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeStoreImageTasks for more information on using the DescribeStoreImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeStoreImageTasksRequest method. +// req, resp := client.DescribeStoreImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStoreImageTasks +func (c *EC2) DescribeStoreImageTasksRequest(input *DescribeStoreImageTasksInput) (req *request.Request, output *DescribeStoreImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeStoreImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStoreImageTasksInput{} + } + + output = &DescribeStoreImageTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeStoreImageTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the progress of the AMI store tasks. You can describe the store +// tasks for specified AMIs. If you don't specify the AMIs, you get a paginated +// list of store tasks from the last 31 days. +// +// For each AMI task, the response indicates if the task is InProgress, Completed, +// or Failed. For tasks InProgress, the response shows the estimated progress +// as a percentage. +// +// Tasks are listed in reverse chronological order. Currently, only tasks from +// the past 31 days can be viewed. +// +// To use this API, you must have the required permissions. For more information, +// see Permissions for storing and restoring AMIs using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Store and restore an AMI using S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeStoreImageTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStoreImageTasks +func (c *EC2) DescribeStoreImageTasks(input *DescribeStoreImageTasksInput) (*DescribeStoreImageTasksOutput, error) { + req, out := c.DescribeStoreImageTasksRequest(input) + return out, req.Send() +} + +// DescribeStoreImageTasksWithContext is the same as DescribeStoreImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStoreImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeStoreImageTasksWithContext(ctx aws.Context, input *DescribeStoreImageTasksInput, opts ...request.Option) (*DescribeStoreImageTasksOutput, error) { + req, out := c.DescribeStoreImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeStoreImageTasksPages iterates over the pages of a DescribeStoreImageTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStoreImageTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStoreImageTasks operation. +// pageNum := 0 +// err := client.DescribeStoreImageTasksPages(params, +// func(page *ec2.DescribeStoreImageTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeStoreImageTasksPages(input *DescribeStoreImageTasksInput, fn func(*DescribeStoreImageTasksOutput, bool) bool) error { + return c.DescribeStoreImageTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeStoreImageTasksPagesWithContext same as DescribeStoreImageTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeStoreImageTasksPagesWithContext(ctx aws.Context, input *DescribeStoreImageTasksInput, fn func(*DescribeStoreImageTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeStoreImageTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeStoreImageTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeStoreImageTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeSubnets = "DescribeSubnets" // DescribeSubnetsRequest generates a "aws/request.Request" representing the @@ -27939,6 +28464,84 @@ func (c *EC2) DisableFastSnapshotRestoresWithContext(ctx aws.Context, input *Dis return out, req.Send() } +const opDisableSerialConsoleAccess = "DisableSerialConsoleAccess" + +// DisableSerialConsoleAccessRequest generates a "aws/request.Request" representing the +// client's request for the DisableSerialConsoleAccess operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableSerialConsoleAccess for more information on using the DisableSerialConsoleAccess +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableSerialConsoleAccessRequest method. +// req, resp := client.DisableSerialConsoleAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableSerialConsoleAccess +func (c *EC2) DisableSerialConsoleAccessRequest(input *DisableSerialConsoleAccessInput) (req *request.Request, output *DisableSerialConsoleAccessOutput) { + op := &request.Operation{ + Name: opDisableSerialConsoleAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableSerialConsoleAccessInput{} + } + + output = &DisableSerialConsoleAccessOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableSerialConsoleAccess API operation for Amazon Elastic Compute Cloud. +// +// Disables access to the EC2 serial console of all instances for your account. +// By default, access to the EC2 serial console is disabled for your account. +// For more information, see Manage account access to the EC2 serial console +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html#serial-console-account-access) +// in the Amazon EC2 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableSerialConsoleAccess for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableSerialConsoleAccess +func (c *EC2) DisableSerialConsoleAccess(input *DisableSerialConsoleAccessInput) (*DisableSerialConsoleAccessOutput, error) { + req, out := c.DisableSerialConsoleAccessRequest(input) + return out, req.Send() +} + +// DisableSerialConsoleAccessWithContext is the same as DisableSerialConsoleAccess with the addition of +// the ability to pass a context and additional request options. +// +// See DisableSerialConsoleAccess for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableSerialConsoleAccessWithContext(ctx aws.Context, input *DisableSerialConsoleAccessInput, opts ...request.Option) (*DisableSerialConsoleAccessOutput, error) { + req, out := c.DisableSerialConsoleAccessRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisableTransitGatewayRouteTablePropagation = "DisableTransitGatewayRouteTablePropagation" // DisableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the @@ -28999,7 +29602,7 @@ func (c *EC2) EnableEbsEncryptionByDefaultRequest(input *EnableEbsEncryptionByDe // Enables EBS encryption by default for your account in the current Region. // // After you enable encryption by default, the EBS volumes that you create are -// are always encrypted, either using the default CMK or the CMK that you specified +// always encrypted, either using the default CMK or the CMK that you specified // when you created each volume. For more information, see Amazon EBS encryption // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -29124,6 +29727,84 @@ func (c *EC2) EnableFastSnapshotRestoresWithContext(ctx aws.Context, input *Enab return out, req.Send() } +const opEnableSerialConsoleAccess = "EnableSerialConsoleAccess" + +// EnableSerialConsoleAccessRequest generates a "aws/request.Request" representing the +// client's request for the EnableSerialConsoleAccess operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableSerialConsoleAccess for more information on using the EnableSerialConsoleAccess +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableSerialConsoleAccessRequest method. +// req, resp := client.EnableSerialConsoleAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableSerialConsoleAccess +func (c *EC2) EnableSerialConsoleAccessRequest(input *EnableSerialConsoleAccessInput) (req *request.Request, output *EnableSerialConsoleAccessOutput) { + op := &request.Operation{ + Name: opEnableSerialConsoleAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableSerialConsoleAccessInput{} + } + + output = &EnableSerialConsoleAccessOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableSerialConsoleAccess API operation for Amazon Elastic Compute Cloud. +// +// Enables access to the EC2 serial console of all instances for your account. +// By default, access to the EC2 serial console is disabled for your account. +// For more information, see Manage account access to the EC2 serial console +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html#serial-console-account-access) +// in the Amazon EC2 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableSerialConsoleAccess for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableSerialConsoleAccess +func (c *EC2) EnableSerialConsoleAccess(input *EnableSerialConsoleAccessInput) (*EnableSerialConsoleAccessOutput, error) { + req, out := c.EnableSerialConsoleAccessRequest(input) + return out, req.Send() +} + +// EnableSerialConsoleAccessWithContext is the same as EnableSerialConsoleAccess with the addition of +// the ability to pass a context and additional request options. +// +// See EnableSerialConsoleAccess for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableSerialConsoleAccessWithContext(ctx aws.Context, input *EnableSerialConsoleAccessInput, opts ...request.Option) (*EnableSerialConsoleAccessOutput, error) { + req, out := c.EnableSerialConsoleAccessRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opEnableTransitGatewayRouteTablePropagation = "EnableTransitGatewayRouteTablePropagation" // EnableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the @@ -29710,7 +30391,7 @@ func (c *EC2) ExportImageRequest(input *ExportImageInput) (req *request.Request, // ExportImage API operation for Amazon Elastic Compute Cloud. // // Exports an Amazon Machine Image (AMI) to a VM file. For more information, -// see Exporting a VM Directory from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html) +// see Exporting a VM directly from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html) // in the VM Import/Export User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -30586,6 +31267,93 @@ func (c *EC2) GetEbsEncryptionByDefaultWithContext(ctx aws.Context, input *GetEb return out, req.Send() } +const opGetFlowLogsIntegrationTemplate = "GetFlowLogsIntegrationTemplate" + +// GetFlowLogsIntegrationTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetFlowLogsIntegrationTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFlowLogsIntegrationTemplate for more information on using the GetFlowLogsIntegrationTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFlowLogsIntegrationTemplateRequest method. +// req, resp := client.GetFlowLogsIntegrationTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetFlowLogsIntegrationTemplate +func (c *EC2) GetFlowLogsIntegrationTemplateRequest(input *GetFlowLogsIntegrationTemplateInput) (req *request.Request, output *GetFlowLogsIntegrationTemplateOutput) { + op := &request.Operation{ + Name: opGetFlowLogsIntegrationTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFlowLogsIntegrationTemplateInput{} + } + + output = &GetFlowLogsIntegrationTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFlowLogsIntegrationTemplate API operation for Amazon Elastic Compute Cloud. +// +// Generates a CloudFormation template that streamlines and automates the integration +// of VPC flow logs with Amazon Athena. This make it easier for you to query +// and gain insights from VPC flow logs data. Based on the information that +// you provide, we configure resources in the template to do the following: +// +// * Create a table in Athena that maps fields to a custom log format +// +// * Create a Lambda function that updates the table with new partitions +// on a daily, weekly, or monthly basis +// +// * Create a table partitioned between two timestamps in the past +// +// * Create a set of named queries in Athena that you can use to get started +// quickly +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetFlowLogsIntegrationTemplate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetFlowLogsIntegrationTemplate +func (c *EC2) GetFlowLogsIntegrationTemplate(input *GetFlowLogsIntegrationTemplateInput) (*GetFlowLogsIntegrationTemplateOutput, error) { + req, out := c.GetFlowLogsIntegrationTemplateRequest(input) + return out, req.Send() +} + +// GetFlowLogsIntegrationTemplateWithContext is the same as GetFlowLogsIntegrationTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See GetFlowLogsIntegrationTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetFlowLogsIntegrationTemplateWithContext(ctx aws.Context, input *GetFlowLogsIntegrationTemplateInput, opts ...request.Option) (*GetFlowLogsIntegrationTemplateOutput, error) { + req, out := c.GetFlowLogsIntegrationTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetGroupsForCapacityReservation = "GetGroupsForCapacityReservation" // GetGroupsForCapacityReservationRequest generates a "aws/request.Request" representing the @@ -31311,6 +32079,84 @@ func (c *EC2) GetReservedInstancesExchangeQuoteWithContext(ctx aws.Context, inpu return out, req.Send() } +const opGetSerialConsoleAccessStatus = "GetSerialConsoleAccessStatus" + +// GetSerialConsoleAccessStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetSerialConsoleAccessStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSerialConsoleAccessStatus for more information on using the GetSerialConsoleAccessStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSerialConsoleAccessStatusRequest method. +// req, resp := client.GetSerialConsoleAccessStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetSerialConsoleAccessStatus +func (c *EC2) GetSerialConsoleAccessStatusRequest(input *GetSerialConsoleAccessStatusInput) (req *request.Request, output *GetSerialConsoleAccessStatusOutput) { + op := &request.Operation{ + Name: opGetSerialConsoleAccessStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSerialConsoleAccessStatusInput{} + } + + output = &GetSerialConsoleAccessStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSerialConsoleAccessStatus API operation for Amazon Elastic Compute Cloud. +// +// Retrieves the access status of your account to the EC2 serial console of +// all instances. By default, access to the EC2 serial console is disabled for +// your account. For more information, see Manage account access to the EC2 +// serial console (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html#serial-console-account-access) +// in the Amazon EC2 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetSerialConsoleAccessStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetSerialConsoleAccessStatus +func (c *EC2) GetSerialConsoleAccessStatus(input *GetSerialConsoleAccessStatusInput) (*GetSerialConsoleAccessStatusOutput, error) { + req, out := c.GetSerialConsoleAccessStatusRequest(input) + return out, req.Send() +} + +// GetSerialConsoleAccessStatusWithContext is the same as GetSerialConsoleAccessStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetSerialConsoleAccessStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetSerialConsoleAccessStatusWithContext(ctx aws.Context, input *GetSerialConsoleAccessStatusInput, opts ...request.Option) (*GetSerialConsoleAccessStatusOutput, error) { + req, out := c.GetSerialConsoleAccessStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetTransitGatewayAttachmentPropagations = "GetTransitGatewayAttachmentPropagations" // GetTransitGatewayAttachmentPropagationsRequest generates a "aws/request.Request" representing the @@ -32099,8 +32945,10 @@ func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, // ImportImage API operation for Amazon Elastic Compute Cloud. // // Import single or multi-volume disk images or EBS snapshots into an Amazon -// Machine Image (AMI). For more information, see Importing a VM as an Image -// Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html) +// Machine Image (AMI). +// +// For more information, see Importing a VM as an image using VM Import/Export +// (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html) // in the VM Import/Export User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -32176,9 +33024,14 @@ func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Re // ImportInstance API operation for Amazon Elastic Compute Cloud. // // Creates an import instance task using metadata from the specified disk image. -// ImportInstance only supports single-volume VMs. To import multi-volume VMs, -// use ImportImage. For more information, see Importing a Virtual Machine Using -// the Amazon EC2 CLI (https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ec2-cli-vmimport-export.html). +// +// This API action supports only single-volume VMs. To import multi-volume VMs, +// use ImportImage instead. +// +// This API action is not supported by the AWS Command Line Interface (AWS CLI). +// For information about using the Amazon EC2 CLI, which is deprecated, see +// Importing a VM to Amazon EC2 (https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#UsingVirtualMachinesinAmazonEC2) +// in the Amazon EC2 CLI Reference PDF file. // // For information about the import manifest referenced by this API action, // see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). @@ -32338,6 +33191,10 @@ func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Re // // Imports a disk into an EBS snapshot. // +// For more information, see Importing a disk as a snapshot using VM Import/Export +// (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-import-snapshot.html) +// in the VM Import/Export User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -32410,8 +33267,16 @@ func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Reques // ImportVolume API operation for Amazon Elastic Compute Cloud. // -// Creates an import volume task using metadata from the specified disk image.For -// more information, see Importing Disks to Amazon EBS (https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/importing-your-volumes-into-amazon-ebs.html). +// Creates an import volume task using metadata from the specified disk image. +// +// This API action supports only single-volume VMs. To import multi-volume VMs, +// use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot +// instead. +// +// This API action is not supported by the AWS Command Line Interface (AWS CLI). +// For information about using the Amazon EC2 CLI, which is deprecated, see +// Importing Disks to Amazon EBS (https://awsdocs.s3.amazonaws.com/EC2/ec2-clt.pdf#importing-your-volumes-into-amazon-ebs) +// in the Amazon EC2 CLI Reference PDF file. // // For information about the import manifest referenced by this API action, // see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). @@ -36732,12 +37597,25 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // For Amazon EBS-backed instances, CreateImage creates and registers the AMI // in a single request, so you don't have to register the AMI yourself. // -// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from -// a snapshot of a root device volume. You specify the snapshot using the block -// device mapping. For more information, see Launching a Linux instance from -// a backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) +// If needed, you can deregister an AMI at any time. Any modifications you make +// to an AMI backed by an instance store volume invalidates its registration. +// If you make changes to an image, deregister the previous image and register +// the new image. +// +// Register a snapshot of a root device volume +// +// You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a +// snapshot of a root device volume. You specify the snapshot using a block +// device mapping. You can't set the encryption state of the volume using the +// block device mapping. If the snapshot is encrypted, or encryption by default +// is enabled, the root volume of an instance launched from the AMI is encrypted. +// +// For more information, see Create a Linux AMI from a snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot) +// and Use encryption with EBS-backed AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. // +// AWS Marketplace product codes +// // If any snapshots have AWS Marketplace product codes, they are copied to the // new AMI. // @@ -36763,11 +37641,6 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // Obtaining billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) // in the Amazon Elastic Compute Cloud User Guide. // -// If needed, you can deregister an AMI at any time. Any modifications you make -// to an AMI backed by an instance store volume invalidates its registration. -// If you make changes to an image, deregister the previous image and register -// the new image. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -42770,8 +43643,10 @@ func (s *ApplySecurityGroupsToClientVpnTargetNetworkOutput) SetSecurityGroupIds( type AssignIpv6AddressesInput struct { _ struct{} `type:"structure"` - // The number of IPv6 addresses to assign to the network interface. Amazon EC2 - // automatically selects the IPv6 addresses from the subnet range. You can't + // The number of additional IPv6 addresses to assign to the network interface. + // The specified number of IPv6 addresses are assigned in addition to the existing + // IPv6 addresses that are already assigned to the network interface. Amazon + // EC2 automatically selects the IPv6 addresses from the subnet range. You can't // use this option if specifying specific IPv6 addresses. Ipv6AddressCount *int64 `locationName:"ipv6AddressCount" type:"integer"` @@ -42829,7 +43704,8 @@ func (s *AssignIpv6AddressesInput) SetNetworkInterfaceId(v string) *AssignIpv6Ad type AssignIpv6AddressesOutput struct { _ struct{} `type:"structure"` - // The IPv6 addresses assigned to the network interface. + // The new IPv6 addresses assigned to the network interface. Existing IPv6 addresses + // that were assigned to the network interface before the request are not included. AssignedIpv6Addresses []*string `locationName:"assignedIpv6Addresses" locationNameList:"item" type:"list"` // The ID of the network interface. @@ -43007,10 +43883,10 @@ type AssociateAddressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you - // can specify either the instance ID or the network interface ID, but not both. - // The operation fails if you specify an instance ID unless exactly one network - // interface is attached. + // The ID of the instance. The instance must have exactly one attached network + // interface. For EC2-VPC, you can specify either the instance ID or the network + // interface ID, but not both. For EC2-Classic, you must specify an instance + // ID and the instance must be in the running state. InstanceId *string `type:"string"` // [EC2-VPC] The ID of the network interface. If the instance has more than @@ -43025,8 +43901,8 @@ type AssociateAddressInput struct { // address is associated with the primary private IP address. PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` - // The Elastic IP address to associate with the instance. This is required for - // EC2-Classic. + // [EC2-Classic] The Elastic IP address to associate with the instance. This + // is required for EC2-Classic. PublicIp *string `type:"string"` } @@ -43359,7 +44235,7 @@ type AssociateEnclaveCertificateIamRoleOutput struct { CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` // The Amazon S3 object key where the certificate, certificate chain, and encrypted - // private key bundle are stored. The object key is formatted as follows: certificate_arn/role_arn. + // private key bundle are stored. The object key is formatted as follows: role_arn/certificate_arn. CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` // The ID of the AWS KMS CMK used to encrypt the private key of the certificate. @@ -43964,7 +44840,7 @@ type AssociatedRole struct { // The key of the Amazon S3 object ey where the certificate, certificate chain, // and encrypted private key bundle is stored. The object key is formated as - // follows: certificate_arn/role_arn. + // follows: role_arn/certificate_arn. CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` // The ID of the KMS customer master key (CMK) used to encrypt the private key. @@ -44072,6 +44948,77 @@ func (s *AssociationStatus) SetMessage(v string) *AssociationStatus { return s } +// Describes integration options for Amazon Athena. +type AthenaIntegration struct { + _ struct{} `type:"structure"` + + // The location in Amazon S3 to store the generated CloudFormation template. + // + // IntegrationResultS3DestinationArn is a required field + IntegrationResultS3DestinationArn *string `type:"string" required:"true"` + + // The end date for the partition. + PartitionEndDate *time.Time `type:"timestamp"` + + // The schedule for adding new partitions to the table. + // + // PartitionLoadFrequency is a required field + PartitionLoadFrequency *string `type:"string" required:"true" enum:"PartitionLoadFrequency"` + + // The start date for the partition. + PartitionStartDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s AthenaIntegration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AthenaIntegration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AthenaIntegration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AthenaIntegration"} + if s.IntegrationResultS3DestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("IntegrationResultS3DestinationArn")) + } + if s.PartitionLoadFrequency == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionLoadFrequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIntegrationResultS3DestinationArn sets the IntegrationResultS3DestinationArn field's value. +func (s *AthenaIntegration) SetIntegrationResultS3DestinationArn(v string) *AthenaIntegration { + s.IntegrationResultS3DestinationArn = &v + return s +} + +// SetPartitionEndDate sets the PartitionEndDate field's value. +func (s *AthenaIntegration) SetPartitionEndDate(v time.Time) *AthenaIntegration { + s.PartitionEndDate = &v + return s +} + +// SetPartitionLoadFrequency sets the PartitionLoadFrequency field's value. +func (s *AthenaIntegration) SetPartitionLoadFrequency(v string) *AthenaIntegration { + s.PartitionLoadFrequency = &v + return s +} + +// SetPartitionStartDate sets the PartitionStartDate field's value. +func (s *AthenaIntegration) SetPartitionStartDate(v time.Time) *AthenaIntegration { + s.PartitionStartDate = &v + return s +} + type AttachClassicLinkVpcInput struct { _ struct{} `type:"structure"` @@ -48688,8 +49635,8 @@ type CopyImageInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier you provide to ensure idempotency of the - // request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon Elastic Compute Cloud User Guide. + // request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html) + // in the Amazon EC2 API Reference. ClientToken *string `type:"string"` // A description for the new AMI in the destination Region. @@ -51191,7 +52138,7 @@ type CreateInstanceExportTaskInput struct { // maximum length is 255 characters. Description *string `locationName:"description" type:"string"` - // The format and location for an instance export task. + // The format and location for an export instance task. // // ExportToS3Task is a required field ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure" required:"true"` @@ -51201,7 +52148,7 @@ type CreateInstanceExportTaskInput struct { // InstanceId is a required field InstanceId *string `locationName:"instanceId" type:"string" required:"true"` - // The tags to apply to the instance export task during creation. + // The tags to apply to the export instance task during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The target virtualization environment. @@ -51272,7 +52219,7 @@ func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateIn type CreateInstanceExportTaskOutput struct { _ struct{} `type:"structure"` - // Information about the instance export task. + // Information about the export instance task. ExportTask *ExportTask `locationName:"exportTask" type:"structure"` } @@ -52989,6 +53936,111 @@ func (s *CreatePlacementGroupOutput) SetPlacementGroup(v *PlacementGroup) *Creat return s } +type CreateReplaceRootVolumeTaskInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. If you do not specify a client token, a randomly generated token + // is used for the request to ensure idempotency. For more information, see + // Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance for which to replace the root volume. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The ID of the snapshot from which to restore the replacement root volume. + // If you want to restore the volume to the initial launch state, omit this + // parameter. + SnapshotId *string `type:"string"` + + // The tags to apply to the root volume replacement task. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateReplaceRootVolumeTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplaceRootVolumeTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplaceRootVolumeTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplaceRootVolumeTaskInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateReplaceRootVolumeTaskInput) SetClientToken(v string) *CreateReplaceRootVolumeTaskInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateReplaceRootVolumeTaskInput) SetDryRun(v bool) *CreateReplaceRootVolumeTaskInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *CreateReplaceRootVolumeTaskInput) SetInstanceId(v string) *CreateReplaceRootVolumeTaskInput { + s.InstanceId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *CreateReplaceRootVolumeTaskInput) SetSnapshotId(v string) *CreateReplaceRootVolumeTaskInput { + s.SnapshotId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateReplaceRootVolumeTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateReplaceRootVolumeTaskInput { + s.TagSpecifications = v + return s +} + +type CreateReplaceRootVolumeTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the root volume replacement task. + ReplaceRootVolumeTask *ReplaceRootVolumeTask `locationName:"replaceRootVolumeTask" type:"structure"` +} + +// String returns the string representation +func (s CreateReplaceRootVolumeTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplaceRootVolumeTaskOutput) GoString() string { + return s.String() +} + +// SetReplaceRootVolumeTask sets the ReplaceRootVolumeTask field's value. +func (s *CreateReplaceRootVolumeTaskOutput) SetReplaceRootVolumeTask(v *ReplaceRootVolumeTask) *CreateReplaceRootVolumeTaskOutput { + s.ReplaceRootVolumeTask = v + return s +} + // Contains the parameters for CreateReservedInstancesListing. type CreateReservedInstancesListingInput struct { _ struct{} `type:"structure"` @@ -53100,6 +54152,119 @@ func (s *CreateReservedInstancesListingOutput) SetReservedInstancesListings(v [] return s } +type CreateRestoreImageTaskInput struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket that contains the stored AMI object. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name for the restored AMI. The name must be unique for AMIs in the Region + // for this account. If you do not provide a name, the new AMI gets the same + // name as the original AMI. + Name *string `type:"string"` + + // The name of the stored AMI object in the bucket. + // + // ObjectKey is a required field + ObjectKey *string `type:"string" required:"true"` + + // The tags to apply to the AMI and snapshots on restoration. You can tag the + // AMI, the snapshots, or both. + // + // * To tag the AMI, the value for ResourceType must be image. + // + // * To tag the snapshots, the value for ResourceType must be snapshot. The + // same tag is applied to all of the snapshots that are created. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateRestoreImageTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRestoreImageTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRestoreImageTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRestoreImageTaskInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ObjectKey == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectKey")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CreateRestoreImageTaskInput) SetBucket(v string) *CreateRestoreImageTaskInput { + s.Bucket = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateRestoreImageTaskInput) SetDryRun(v bool) *CreateRestoreImageTaskInput { + s.DryRun = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateRestoreImageTaskInput) SetName(v string) *CreateRestoreImageTaskInput { + s.Name = &v + return s +} + +// SetObjectKey sets the ObjectKey field's value. +func (s *CreateRestoreImageTaskInput) SetObjectKey(v string) *CreateRestoreImageTaskInput { + s.ObjectKey = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateRestoreImageTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateRestoreImageTaskInput { + s.TagSpecifications = v + return s +} + +type CreateRestoreImageTaskOutput struct { + _ struct{} `type:"structure"` + + // The AMI ID. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CreateRestoreImageTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRestoreImageTaskOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *CreateRestoreImageTaskOutput) SetImageId(v string) *CreateRestoreImageTaskOutput { + s.ImageId = &v + return s +} + type CreateRouteInput struct { _ struct{} `type:"structure"` @@ -53812,6 +54977,104 @@ func (s *CreateSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v *Sp return s } +type CreateStoreImageTaskInput struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket in which the AMI object will be stored. The bucket + // must be in the Region in which the request is being made. The AMI object + // appears in the bucket only after the upload task has completed. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AMI. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The tags to apply to the AMI object that will be stored in the S3 bucket. + S3ObjectTags []*S3ObjectTag `locationName:"S3ObjectTag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateStoreImageTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStoreImageTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStoreImageTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStoreImageTaskInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CreateStoreImageTaskInput) SetBucket(v string) *CreateStoreImageTaskInput { + s.Bucket = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateStoreImageTaskInput) SetDryRun(v bool) *CreateStoreImageTaskInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *CreateStoreImageTaskInput) SetImageId(v string) *CreateStoreImageTaskInput { + s.ImageId = &v + return s +} + +// SetS3ObjectTags sets the S3ObjectTags field's value. +func (s *CreateStoreImageTaskInput) SetS3ObjectTags(v []*S3ObjectTag) *CreateStoreImageTaskInput { + s.S3ObjectTags = v + return s +} + +type CreateStoreImageTaskOutput struct { + _ struct{} `type:"structure"` + + // The name of the stored AMI object in the S3 bucket. + ObjectKey *string `locationName:"objectKey" type:"string"` +} + +// String returns the string representation +func (s CreateStoreImageTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStoreImageTaskOutput) GoString() string { + return s.String() +} + +// SetObjectKey sets the ObjectKey field's value. +func (s *CreateStoreImageTaskOutput) SetObjectKey(v string) *CreateStoreImageTaskOutput { + s.ObjectKey = &v + return s +} + type CreateSubnetInput struct { _ struct{} `type:"structure"` @@ -56142,9 +57405,7 @@ type CreateVpcEndpointInput struct { // true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to // set the VPC attributes. // - // Private DNS is not supported for Amazon S3 interface endpoints. - // - // Default: true for supported endpoints + // Default: true PrivateDnsEnabled *bool `type:"boolean"` // (Gateway endpoint) One or more route table IDs. @@ -62408,6 +63669,9 @@ type DescribeCapacityReservationsInput struct { // to invalid request parameters, capacity constraints, or instance limit // constraints. Failed requests are retained for 60 minutes. // + // * start-date - The date and time at which the Capacity Reservation was + // started. + // // * end-date - The date and time at which the Capacity Reservation expires. // When a Capacity Reservation expires, the reserved capacity is released // and you can no longer launch instances into it. The Capacity Reservation's @@ -65865,9 +67129,9 @@ type DescribeImageAttributeInput struct { // The AMI attribute. // - // Note: Depending on your account privileges, the blockDeviceMapping attribute - // may return a Client.AuthFailure error. If this happens, use DescribeImages - // to get information about the block device mapping for the AMI. + // Note: The blockDeviceMapping attribute is deprecated. Using this attribute + // returns the Client.AuthFailure error. To get information about the block + // device mappings for an AMI, use the DescribeImages action. // // Attribute is a required field Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"` @@ -65935,6 +67199,9 @@ type DescribeImageAttributeOutput struct { // The block device mapping entries. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + // Describes a value for a resource attribute that is a String. + BootMode *AttributeValue `locationName:"bootMode" type:"structure"` + // A description for the AMI. Description *AttributeValue `locationName:"description" type:"structure"` @@ -65974,6 +67241,12 @@ func (s *DescribeImageAttributeOutput) SetBlockDeviceMappings(v []*BlockDeviceMa return s } +// SetBootMode sets the BootMode field's value. +func (s *DescribeImageAttributeOutput) SetBootMode(v *AttributeValue) *DescribeImageAttributeOutput { + s.BootMode = v + return s +} + // SetDescription sets the Description field's value. func (s *DescribeImageAttributeOutput) SetDescription(v *AttributeValue) *DescribeImageAttributeOutput { s.Description = v @@ -66494,9 +67767,12 @@ type DescribeInstanceAttributeOutput struct { // The device name of the root device volume (for example, /dev/sda1). RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` - // Indicates whether source/destination checking is enabled. A value of true - // means that checking is enabled, and false means that checking is disabled. - // This value must be false for a NAT instance to perform NAT. + // Enable or disable source/destination checks, which ensure that the instance + // is either the source or the destination of any traffic that it receives. + // If the value is true, source/destination checks are enabled; otherwise, they + // are disabled. The default value is true. You must disable source/destination + // checks if the instance runs services such as network address translation, + // routing, or firewalls. SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` // Indicates whether enhanced networking with the Intel 82599 Virtual Function @@ -67129,6 +68405,9 @@ type DescribeInstanceTypesInput struct { // // * memory-info.size-in-mib - The memory size. // + // * network-info.efa-info.maximum-efa-interfaces - The maximum number of + // Elastic Fabric Adapters (EFAs) per instance. + // // * network-info.efa-supported - Indicates whether the instance type supports // Elastic Fabric Adapter (EFA) (true | false). // @@ -67156,6 +68435,8 @@ type DescribeInstanceTypesInput struct { // * processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in // GHz. // + // * supported-boot-mode - The boot mode (legacy-bios | uefi). + // // * supported-root-device-type - The root device type (ebs | instance-store). // // * supported-usage-class - The usage class (on-demand | spot). @@ -67465,6 +68746,8 @@ type DescribeInstancesInput struct { // // * network-interface.vpc-id - The ID of the VPC for the network interface. // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // // * owner-id - The AWS account ID of the instance owner. // // * placement-group-name - The name of the placement group for the instance. @@ -70078,8 +71361,8 @@ type DescribeNetworkInterfacesInput struct { // // * private-dns-name - The private DNS name of the network interface (IPv4). // - // * requester-id - The ID of the entity that launched the instance on your - // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // * requester-id - The alias or AWS account ID of the principal or service + // that created the network interface. // // * requester-managed - Indicates whether the network interface is being // managed by an AWS service (for example, AWS Management Console, Auto Scaling, @@ -70705,6 +71988,118 @@ func (s *DescribeRegionsOutput) SetRegions(v []*Region) *DescribeRegionsOutput { return s } +type DescribeReplaceRootVolumeTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Filter to use: + // + // * instance-id - The ID of the instance for which the root volume replacement + // task was created. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the root volume replacement task to view. + ReplaceRootVolumeTaskIds []*string `locationName:"ReplaceRootVolumeTaskId" locationNameList:"ReplaceRootVolumeTaskId" type:"list"` +} + +// String returns the string representation +func (s DescribeReplaceRootVolumeTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplaceRootVolumeTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReplaceRootVolumeTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplaceRootVolumeTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeReplaceRootVolumeTasksInput) SetDryRun(v bool) *DescribeReplaceRootVolumeTasksInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeReplaceRootVolumeTasksInput) SetFilters(v []*Filter) *DescribeReplaceRootVolumeTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeReplaceRootVolumeTasksInput) SetMaxResults(v int64) *DescribeReplaceRootVolumeTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReplaceRootVolumeTasksInput) SetNextToken(v string) *DescribeReplaceRootVolumeTasksInput { + s.NextToken = &v + return s +} + +// SetReplaceRootVolumeTaskIds sets the ReplaceRootVolumeTaskIds field's value. +func (s *DescribeReplaceRootVolumeTasksInput) SetReplaceRootVolumeTaskIds(v []*string) *DescribeReplaceRootVolumeTasksInput { + s.ReplaceRootVolumeTaskIds = v + return s +} + +type DescribeReplaceRootVolumeTasksOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the root volume replacement task. + ReplaceRootVolumeTasks []*ReplaceRootVolumeTask `locationName:"replaceRootVolumeTaskSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReplaceRootVolumeTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplaceRootVolumeTasksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReplaceRootVolumeTasksOutput) SetNextToken(v string) *DescribeReplaceRootVolumeTasksOutput { + s.NextToken = &v + return s +} + +// SetReplaceRootVolumeTasks sets the ReplaceRootVolumeTasks field's value. +func (s *DescribeReplaceRootVolumeTasksOutput) SetReplaceRootVolumeTasks(v []*ReplaceRootVolumeTask) *DescribeReplaceRootVolumeTasksOutput { + s.ReplaceRootVolumeTasks = v + return s +} + // Contains the parameters for DescribeReservedInstances. type DescribeReservedInstancesInput struct { _ struct{} `type:"structure"` @@ -71824,7 +73219,7 @@ type DescribeSecurityGroupsInput struct { // been referenced in an outbound security group rule. // // * egress.ip-permission.group-name - The name of a security group that - // has been referenced in an outbound security group rule. + // is referenced in an outbound security group rule. // // * egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound // security group rule. @@ -71833,7 +73228,7 @@ type DescribeSecurityGroupsInput struct { // a security group rule allows outbound access. // // * egress.ip-permission.protocol - The IP protocol for an outbound security - // group rule (tcp | udp | icmp or a protocol number). + // group rule (tcp | udp | icmp, a protocol number, or -1 for all protocols). // // * egress.ip-permission.to-port - For an outbound rule, the end of port // range for the TCP and UDP protocols, or an ICMP code. @@ -71854,8 +73249,8 @@ type DescribeSecurityGroupsInput struct { // * ip-permission.group-id - The ID of a security group that has been referenced // in an inbound security group rule. // - // * ip-permission.group-name - The name of a security group that has been - // referenced in an inbound security group rule. + // * ip-permission.group-name - The name of a security group that is referenced + // in an inbound security group rule. // // * ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security // group rule. @@ -71864,7 +73259,7 @@ type DescribeSecurityGroupsInput struct { // security group rule allows inbound access. // // * ip-permission.protocol - The IP protocol for an inbound security group - // rule (tcp | udp | icmp or a protocol number). + // rule (tcp | udp | icmp, a protocol number, or -1 for all protocols). // // * ip-permission.to-port - For an inbound rule, the end of port range for // the TCP and UDP protocols, or an ICMP code. @@ -73162,6 +74557,124 @@ func (s *DescribeStaleSecurityGroupsOutput) SetStaleSecurityGroupSet(v []*StaleS return s } +type DescribeStoreImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * task-state - Returns tasks in a certain state (InProgress | Completed + // | Failed) + // + // * bucket - Returns task information for tasks that targeted a specific + // bucket. For the filter value, specify the bucket name. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The AMI IDs for which to show progress. Up to 20 AMI IDs can be included + // in a request. + ImageIds []*string `locationName:"ImageId" locationNameList:"item" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 1 and 200. You cannot specify this parameter and the + // ImageIDs parameter in the same call. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStoreImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStoreImageTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStoreImageTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStoreImageTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeStoreImageTasksInput) SetDryRun(v bool) *DescribeStoreImageTasksInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeStoreImageTasksInput) SetFilters(v []*Filter) *DescribeStoreImageTasksInput { + s.Filters = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *DescribeStoreImageTasksInput) SetImageIds(v []*string) *DescribeStoreImageTasksInput { + s.ImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeStoreImageTasksInput) SetMaxResults(v int64) *DescribeStoreImageTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeStoreImageTasksInput) SetNextToken(v string) *DescribeStoreImageTasksInput { + s.NextToken = &v + return s +} + +type DescribeStoreImageTasksOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The information about the AMI store tasks. + StoreImageTaskResults []*StoreImageTaskResult `locationName:"storeImageTaskResultSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeStoreImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStoreImageTasksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeStoreImageTasksOutput) SetNextToken(v string) *DescribeStoreImageTasksOutput { + s.NextToken = &v + return s +} + +// SetStoreImageTaskResults sets the StoreImageTaskResults field's value. +func (s *DescribeStoreImageTasksOutput) SetStoreImageTaskResults(v []*StoreImageTaskResult) *DescribeStoreImageTasksOutput { + s.StoreImageTaskResults = v + return s +} + type DescribeSubnetsInput struct { _ struct{} `type:"structure"` @@ -73198,6 +74711,8 @@ type DescribeSubnetsInput struct { // * ipv6-cidr-block-association.state - The state of an IPv6 CIDR block // associated with the subnet. // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // // * owner-id - The ID of the AWS account that owns the subnet. // // * state - The state of the subnet (pending | available). @@ -76045,6 +77560,8 @@ type DescribeVpcEndpointServicesInput struct { // // * service-name - The name of the service. // + // * service-type - The type of service (Interface | Gateway). + // // * tag: - The key/value combination of a tag assigned to the resource. // Use the tag key in the filter name and the tag value as the filter value. // For example, to find all resources that have a tag with the key Owner @@ -77708,6 +79225,57 @@ func (s *DisableFastSnapshotRestoresOutput) SetUnsuccessful(v []*DisableFastSnap return s } +type DisableSerialConsoleAccessInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DisableSerialConsoleAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSerialConsoleAccessInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableSerialConsoleAccessInput) SetDryRun(v bool) *DisableSerialConsoleAccessInput { + s.DryRun = &v + return s +} + +type DisableSerialConsoleAccessOutput struct { + _ struct{} `type:"structure"` + + // If true, access to the EC2 serial console of all instances is enabled for + // your account. If false, access to the EC2 serial console of all instances + // is disabled for your account. + SerialConsoleAccessEnabled *bool `locationName:"serialConsoleAccessEnabled" type:"boolean"` +} + +// String returns the string representation +func (s DisableSerialConsoleAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSerialConsoleAccessOutput) GoString() string { + return s.String() +} + +// SetSerialConsoleAccessEnabled sets the SerialConsoleAccessEnabled field's value. +func (s *DisableSerialConsoleAccessOutput) SetSerialConsoleAccessEnabled(v bool) *DisableSerialConsoleAccessOutput { + s.SerialConsoleAccessEnabled = &v + return s +} + type DisableTransitGatewayRouteTablePropagationInput struct { _ struct{} `type:"structure"` @@ -79374,6 +80942,30 @@ func (s *EbsOptimizedInfo) SetMaximumThroughputInMBps(v float64) *EbsOptimizedIn return s } +// Describes the Elastic Fabric Adapters for the instance type. +type EfaInfo struct { + _ struct{} `type:"structure"` + + // The maximum number of Elastic Fabric Adapters for the instance type. + MaximumEfaInterfaces *int64 `locationName:"maximumEfaInterfaces" type:"integer"` +} + +// String returns the string representation +func (s EfaInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EfaInfo) GoString() string { + return s.String() +} + +// SetMaximumEfaInterfaces sets the MaximumEfaInterfaces field's value. +func (s *EfaInfo) SetMaximumEfaInterfaces(v int64) *EfaInfo { + s.MaximumEfaInterfaces = &v + return s +} + // Describes an egress-only internet gateway. type EgressOnlyInternetGateway struct { _ struct{} `type:"structure"` @@ -80112,6 +81704,57 @@ func (s *EnableFastSnapshotRestoresOutput) SetUnsuccessful(v []*EnableFastSnapsh return s } +type EnableSerialConsoleAccessInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s EnableSerialConsoleAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSerialConsoleAccessInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableSerialConsoleAccessInput) SetDryRun(v bool) *EnableSerialConsoleAccessInput { + s.DryRun = &v + return s +} + +type EnableSerialConsoleAccessOutput struct { + _ struct{} `type:"structure"` + + // If true, access to the EC2 serial console of all instances is enabled for + // your account. If false, access to the EC2 serial console of all instances + // is disabled for your account. + SerialConsoleAccessEnabled *bool `locationName:"serialConsoleAccessEnabled" type:"boolean"` +} + +// String returns the string representation +func (s EnableSerialConsoleAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSerialConsoleAccessOutput) GoString() string { + return s.String() +} + +// SetSerialConsoleAccessEnabled sets the SerialConsoleAccessEnabled field's value. +func (s *EnableSerialConsoleAccessOutput) SetSerialConsoleAccessEnabled(v bool) *EnableSerialConsoleAccessOutput { + s.SerialConsoleAccessEnabled = &v + return s +} + type EnableTransitGatewayRouteTablePropagationInput struct { _ struct{} `type:"structure"` @@ -81236,7 +82879,7 @@ type ExportImageInput struct { // S3ExportLocation is a required field S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` - // The tags to apply to the image being exported. + // The tags to apply to the export image task during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } @@ -81354,7 +82997,7 @@ type ExportImageOutput struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` - // Any tags assigned to the image being exported. + // Any tags assigned to the export image task. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -81454,7 +83097,7 @@ type ExportImageTask struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` - // Any tags assigned to the image being exported. + // Any tags assigned to the export image task. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -81516,7 +83159,7 @@ func (s *ExportImageTask) SetTags(v []*Tag) *ExportImageTask { return s } -// Describes an instance export task. +// Describes an export instance task. type ExportTask struct { _ struct{} `type:"structure"` @@ -81675,7 +83318,7 @@ func (s *ExportTaskS3LocationRequest) SetS3Prefix(v string) *ExportTaskS3Locatio return s } -// Describes the format and location for an instance export task. +// Describes the format and location for the export task. type ExportToS3Task struct { _ struct{} `type:"structure"` @@ -81728,7 +83371,7 @@ func (s *ExportToS3Task) SetS3Key(v string) *ExportToS3Task { return s } -// Describes an instance export task. +// Describes an export instance task. type ExportToS3TaskSpecification struct { _ struct{} `type:"structure"` @@ -82005,29 +83648,7 @@ func (s *FederatedAuthenticationRequest) SetSelfServiceSAMLProviderArn(v string) // A filter name and value pair that is used to return a more specific list // of results from a describe operation. Filters can be used to match a set -// of resources by specific criteria, such as tags, attributes, or IDs. The -// filters supported by a describe operation are documented with the describe -// operation. For example: -// -// * DescribeAvailabilityZones -// -// * DescribeImages -// -// * DescribeInstances -// -// * DescribeKeyPairs -// -// * DescribeSecurityGroups -// -// * DescribeSnapshots -// -// * DescribeSubnets -// -// * DescribeTags -// -// * DescribeVolumes -// -// * DescribeVpcs +// of resources by specific criteria, such as tags, attributes, or IDs. type Filter struct { _ struct{} `type:"structure"` @@ -82329,6 +83950,9 @@ type FleetLaunchTemplateConfigRequest struct { // Any parameters that you specify override the same parameters in the launch // template. + // + // For fleets of type request and maintain, a maximum of 300 items is allowed + // across all launch templates. Overrides []*FleetLaunchTemplateOverridesRequest `locationNameList:"item" type:"list"` } @@ -82385,12 +84009,21 @@ type FleetLaunchTemplateOverrides struct { // The location where the instance launched, if applicable. Placement *PlacementResponse `locationName:"placement" type:"structure"` - // The priority for the launch template override. If AllocationStrategy is set - // to prioritized, EC2 Fleet uses priority to determine which launch template - // override to use first in fulfilling On-Demand capacity. The highest priority - // is launched first. Valid values are whole numbers starting at 0. The lower - // the number, the higher the priority. If no number is set, the override has - // the lowest priority. + // The priority for the launch template override. The highest priority is launched + // first. + // + // If the On-Demand AllocationStrategy is set to prioritized, EC2 Fleet uses + // priority to determine which launch template override to use first in fulfilling + // On-Demand capacity. + // + // If the Spot AllocationStrategy is set to capacity-optimized-prioritized, + // EC2 Fleet uses priority on a best-effort basis to determine which launch + // template override to use in fulfilling Spot capacity, but optimizes for capacity + // first. + // + // Valid values are whole numbers starting at 0. The lower the number, the higher + // the priority. If no number is set, the override has the lowest priority. + // You can set the same priority for different launch template overrides. Priority *float64 `locationName:"priority" type:"double"` // The ID of the subnet in which to launch the instances. @@ -82468,12 +84101,21 @@ type FleetLaunchTemplateOverridesRequest struct { // The location where the instance launched, if applicable. Placement *Placement `type:"structure"` - // The priority for the launch template override. If AllocationStrategy is set - // to prioritized, EC2 Fleet uses priority to determine which launch template - // override to use first in fulfilling On-Demand capacity. The highest priority - // is launched first. Valid values are whole numbers starting at 0. The lower - // the number, the higher the priority. If no number is set, the launch template - // override has the lowest priority. + // The priority for the launch template override. The highest priority is launched + // first. + // + // If the On-Demand AllocationStrategy is set to prioritized, EC2 Fleet uses + // priority to determine which launch template override to use first in fulfilling + // On-Demand capacity. + // + // If the Spot AllocationStrategy is set to capacity-optimized-prioritized, + // EC2 Fleet uses priority on a best-effort basis to determine which launch + // template override to use in fulfilling Spot capacity, but optimizes for capacity + // first. + // + // Valid values are whole numbers starting at 0. The lower the number, the higher + // the priority. If no number is set, the launch template override has the lowest + // priority. You can set the same priority for different launch template overrides. Priority *float64 `type:"double"` // The IDs of the subnets in which to launch the instances. Separate multiple @@ -84156,6 +85798,113 @@ func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *Get return s } +type GetFlowLogsIntegrationTemplateInput struct { + _ struct{} `type:"structure"` + + // To store the CloudFormation template in Amazon S3, specify the location in + // Amazon S3. + // + // ConfigDeliveryS3DestinationArn is a required field + ConfigDeliveryS3DestinationArn *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the flow log. + // + // FlowLogId is a required field + FlowLogId *string `type:"string" required:"true"` + + // Information about the service integration. + // + // IntegrateServices is a required field + IntegrateServices *IntegrateServices `locationName:"IntegrateService" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetFlowLogsIntegrationTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFlowLogsIntegrationTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFlowLogsIntegrationTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFlowLogsIntegrationTemplateInput"} + if s.ConfigDeliveryS3DestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigDeliveryS3DestinationArn")) + } + if s.FlowLogId == nil { + invalidParams.Add(request.NewErrParamRequired("FlowLogId")) + } + if s.IntegrateServices == nil { + invalidParams.Add(request.NewErrParamRequired("IntegrateServices")) + } + if s.IntegrateServices != nil { + if err := s.IntegrateServices.Validate(); err != nil { + invalidParams.AddNested("IntegrateServices", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigDeliveryS3DestinationArn sets the ConfigDeliveryS3DestinationArn field's value. +func (s *GetFlowLogsIntegrationTemplateInput) SetConfigDeliveryS3DestinationArn(v string) *GetFlowLogsIntegrationTemplateInput { + s.ConfigDeliveryS3DestinationArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetFlowLogsIntegrationTemplateInput) SetDryRun(v bool) *GetFlowLogsIntegrationTemplateInput { + s.DryRun = &v + return s +} + +// SetFlowLogId sets the FlowLogId field's value. +func (s *GetFlowLogsIntegrationTemplateInput) SetFlowLogId(v string) *GetFlowLogsIntegrationTemplateInput { + s.FlowLogId = &v + return s +} + +// SetIntegrateServices sets the IntegrateServices field's value. +func (s *GetFlowLogsIntegrationTemplateInput) SetIntegrateServices(v *IntegrateServices) *GetFlowLogsIntegrationTemplateInput { + s.IntegrateServices = v + return s +} + +type GetFlowLogsIntegrationTemplateOutput struct { + _ struct{} `type:"structure"` + + // The generated CloudFormation template. + Result *string `locationName:"result" type:"string"` +} + +// String returns the string representation +func (s GetFlowLogsIntegrationTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFlowLogsIntegrationTemplateOutput) GoString() string { + return s.String() +} + +// SetResult sets the Result field's value. +func (s *GetFlowLogsIntegrationTemplateOutput) SetResult(v string) *GetFlowLogsIntegrationTemplateOutput { + s.Result = &v + return s +} + type GetGroupsForCapacityReservationInput struct { _ struct{} `type:"structure"` @@ -84920,6 +86669,57 @@ func (s *GetReservedInstancesExchangeQuoteOutput) SetValidationFailureReason(v s return s } +type GetSerialConsoleAccessStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetSerialConsoleAccessStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSerialConsoleAccessStatusInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *GetSerialConsoleAccessStatusInput) SetDryRun(v bool) *GetSerialConsoleAccessStatusInput { + s.DryRun = &v + return s +} + +type GetSerialConsoleAccessStatusOutput struct { + _ struct{} `type:"structure"` + + // If true, access to the EC2 serial console of all instances is enabled for + // your account. If false, access to the EC2 serial console of all instances + // is disabled for your account. + SerialConsoleAccessEnabled *bool `locationName:"serialConsoleAccessEnabled" type:"boolean"` +} + +// String returns the string representation +func (s GetSerialConsoleAccessStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSerialConsoleAccessStatusOutput) GoString() string { + return s.String() +} + +// SetSerialConsoleAccessEnabled sets the SerialConsoleAccessEnabled field's value. +func (s *GetSerialConsoleAccessStatusOutput) SetSerialConsoleAccessEnabled(v bool) *GetSerialConsoleAccessStatusOutput { + s.SerialConsoleAccessEnabled = &v + return s +} + type GetTransitGatewayAttachmentPropagationsInput struct { _ struct{} `type:"structure"` @@ -86585,6 +88385,10 @@ type Image struct { // Any block device mapping entries. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + // The boot mode of the image. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) + // in the Amazon Elastic Compute Cloud User Guide. + BootMode *string `locationName:"bootMode" type:"string" enum:"BootModeValues"` + // The date and time the image was created. CreationDate *string `locationName:"creationDate" type:"string"` @@ -86695,6 +88499,12 @@ func (s *Image) SetBlockDeviceMappings(v []*BlockDeviceMapping) *Image { return s } +// SetBootMode sets the BootMode field's value. +func (s *Image) SetBootMode(v string) *Image { + s.BootMode = &v + return s +} + // SetCreationDate sets the CreationDate field's value. func (s *Image) SetCreationDate(v string) *Image { s.CreationDate = &v @@ -86851,7 +88661,7 @@ type ImageDiskContainer struct { // The format of the disk image being imported. // - // Valid values: OVA | VHD | VHDX |VMDK + // Valid values: OVA | VHD | VHDX | VMDK | RAW Format *string `type:"string"` // The ID of the EBS snapshot to be used for importing the snapshot. @@ -87094,7 +88904,7 @@ type ImportImageInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` - // The tags to apply to the image being imported. + // The tags to apply to the import image task during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } @@ -87286,7 +89096,7 @@ type ImportImageOutput struct { // A detailed status message of the import task. StatusMessage *string `locationName:"statusMessage" type:"string"` - // Any tags assigned to the image being imported. + // Any tags assigned to the import image task. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -88091,7 +89901,7 @@ type ImportSnapshotInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` - // The tags to apply to the snapshot being imported. + // The tags to apply to the import snapshot task during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } @@ -88171,7 +89981,7 @@ type ImportSnapshotOutput struct { // Information about the import snapshot task. SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` - // Any tags assigned to the snapshot being imported. + // Any tags assigned to the import snapshot task. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -88520,6 +90330,10 @@ type Instance struct { // Any block device mapping entries for the instance. BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + // The boot mode of the instance. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) + // in the Amazon EC2 User Guide. + BootMode *string `locationName:"bootMode" type:"string" enum:"BootModeValues"` + // The ID of the Capacity Reservation. CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` @@ -88645,12 +90459,7 @@ type Instance struct { // The security groups for the instance. SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` - // Specifies whether to enable an instance launched in a VPC to perform NAT. - // This controls whether source/destination checking is enabled on the instance. - // A value of true means that checking is enabled, and false means that checking - // is disabled. The value must be false for the instance to perform NAT. For - // more information, see NAT instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) - // in the Amazon VPC User Guide. + // Indicates whether source/destination checking is enabled. SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` // If the request is a Spot Instance request, the ID of the request. @@ -88710,6 +90519,12 @@ func (s *Instance) SetBlockDeviceMappings(v []*InstanceBlockDeviceMapping) *Inst return s } +// SetBootMode sets the BootMode field's value. +func (s *Instance) SetBootMode(v string) *Instance { + s.BootMode = &v + return s +} + // SetCapacityReservationId sets the CapacityReservationId field's value. func (s *Instance) SetCapacityReservationId(v string) *Instance { s.CapacityReservationId = &v @@ -89577,7 +91392,7 @@ type InstanceNetworkInterface struct { // One or more private IPv4 addresses associated with the network interface. PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` - // Indicates whether to validate network traffic to or from this network interface. + // Indicates whether source/destination checking is enabled. SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` // The status of the network interface. @@ -90570,6 +92385,10 @@ type InstanceTypeInfo struct { // Describes the processor. ProcessorInfo *ProcessorInfo `locationName:"processorInfo" type:"structure"` + // The supported boot modes. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) + // in the Amazon EC2 User Guide. + SupportedBootModes []*string `locationName:"supportedBootModes" locationNameList:"item" type:"list"` + // The supported root device types. SupportedRootDeviceTypes []*string `locationName:"supportedRootDeviceTypes" locationNameList:"item" type:"list"` @@ -90707,6 +92526,12 @@ func (s *InstanceTypeInfo) SetProcessorInfo(v *ProcessorInfo) *InstanceTypeInfo return s } +// SetSupportedBootModes sets the SupportedBootModes field's value. +func (s *InstanceTypeInfo) SetSupportedBootModes(v []*string) *InstanceTypeInfo { + s.SupportedBootModes = v + return s +} + // SetSupportedRootDeviceTypes sets the SupportedRootDeviceTypes field's value. func (s *InstanceTypeInfo) SetSupportedRootDeviceTypes(v []*string) *InstanceTypeInfo { s.SupportedRootDeviceTypes = v @@ -90809,6 +92634,53 @@ func (s *InstanceUsage) SetUsedInstanceCount(v int64) *InstanceUsage { return s } +// Describes service integrations with VPC Flow logs. +type IntegrateServices struct { + _ struct{} `type:"structure"` + + // Information about the integration with Amazon Athena. + AthenaIntegrations []*AthenaIntegration `locationName:"AthenaIntegration" locationNameList:"item" min:"1" type:"list"` +} + +// String returns the string representation +func (s IntegrateServices) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntegrateServices) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntegrateServices) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntegrateServices"} + if s.AthenaIntegrations != nil && len(s.AthenaIntegrations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AthenaIntegrations", 1)) + } + if s.AthenaIntegrations != nil { + for i, v := range s.AthenaIntegrations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AthenaIntegrations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAthenaIntegrations sets the AthenaIntegrations field's value. +func (s *IntegrateServices) SetAthenaIntegrations(v []*AthenaIntegration) *IntegrateServices { + s.AthenaIntegrations = v + return s +} + // Describes an internet gateway. type InternetGateway struct { _ struct{} `type:"structure"` @@ -91606,8 +93478,7 @@ type LaunchTemplateBlockDeviceMapping struct { // Information about the block device for an EBS volume. Ebs *LaunchTemplateEbsBlockDevice `locationName:"ebs" type:"structure"` - // Suppresses the specified device included in the block device mapping of the - // AMI. + // To omit the device from the block device mapping, specify an empty string. NoDevice *string `locationName:"noDevice" type:"string"` // The virtual device name (ephemeralN). @@ -91659,8 +93530,7 @@ type LaunchTemplateBlockDeviceMappingRequest struct { // launched. Ebs *LaunchTemplateEbsBlockDeviceRequest `type:"structure"` - // Suppresses the specified device included in the block device mapping of the - // AMI. + // To omit the device from the block device mapping, specify an empty string. NoDevice *string `type:"string"` // The virtual device name (ephemeralN). Instance store volumes are numbered @@ -92025,9 +93895,8 @@ type LaunchTemplateEbsBlockDeviceRequest struct { // on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). // Other instance families guarantee performance up to 32,000 IOPS. // - // This parameter is required for io1 and io2 volumes. The default for gp3 volumes - // is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard - // volumes. + // This parameter is supported for io1, io2, and gp3 volumes only. This parameter + // is not supported for gp2, st1, sc1, or standard volumes. Iops *int64 `type:"integer"` // The ARN of the symmetric AWS Key Management Service (AWS KMS) CMK used for @@ -92043,11 +93912,8 @@ type LaunchTemplateEbsBlockDeviceRequest struct { Throughput *int64 `type:"integer"` // The size of the volume, in GiBs. You must specify either a snapshot ID or - // a volume size. If you specify a snapshot, the default is the snapshot size. - // You can specify a volume size that is equal to or larger than the snapshot - // size. - // - // The following are the supported volumes sizes for each volume type: + // a volume size. The following are the supported volumes sizes for each volume + // type: // // * gp2 and gp3: 1-16,384 // @@ -92058,8 +93924,7 @@ type LaunchTemplateEbsBlockDeviceRequest struct { // * standard: 1-1,024 VolumeSize *int64 `type:"integer"` - // The volume type. The default is gp2. For more information, see Amazon EBS - // volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. VolumeType *string `type:"string" enum:"VolumeType"` } @@ -92972,12 +94837,20 @@ type LaunchTemplateOverrides struct { // The instance type. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` - // The priority for the launch template override. If OnDemandAllocationStrategy - // is set to prioritized, Spot Fleet uses priority to determine which launch - // template override to use first in fulfilling On-Demand capacity. The highest - // priority is launched first. Valid values are whole numbers starting at 0. - // The lower the number, the higher the priority. If no number is set, the launch - // template override has the lowest priority. + // The priority for the launch template override. The highest priority is launched + // first. + // + // If OnDemandAllocationStrategy is set to prioritized, Spot Fleet uses priority + // to determine which launch template override to use first in fulfilling On-Demand + // capacity. + // + // If the Spot AllocationStrategy is set to capacityOptimizedPrioritized, Spot + // Fleet uses priority on a best-effort basis to determine which launch template + // override to use in fulfilling Spot capacity, but optimizes for capacity first. + // + // Valid values are whole numbers starting at 0. The lower the number, the higher + // the priority. If no number is set, the launch template override has the lowest + // priority. You can set the same priority for different launch template overrides. Priority *float64 `locationName:"priority" type:"double"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. @@ -95876,9 +97749,10 @@ type ModifyInstanceAttributeInput struct { // a PV instance can make it unreachable. EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` - // [EC2-VPC] Changes the security groups of the instance. You must specify at - // least one security group, even if it's just the default security group for - // the VPC. You must specify the security group ID, not the security group name. + // [EC2-VPC] Replaces the security groups of the instance with the specified + // security groups. You must specify at least one security group, even if it's + // just the default security group for the VPC. You must specify the security + // group ID, not the security group name. Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` // The ID of the instance. @@ -95906,9 +97780,12 @@ type ModifyInstanceAttributeInput struct { // PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` - // Specifies whether source/destination checking is enabled. A value of true - // means that checking is enabled, and false means that checking is disabled. - // This value must be false for a NAT instance to perform NAT. + // Enable or disable source/destination checks, which ensure that the instance + // is either the source or the destination of any traffic that it receives. + // If the value is true, source/destination checks are enabled; otherwise, they + // are disabled. The default value is true. You must disable source/destination + // checks if the instance runs services such as network address translation, + // routing, or firewalls. SourceDestCheck *AttributeBooleanValue `type:"structure"` // Set to simple to enable enhanced networking with the Intel 82599 Virtual @@ -96857,11 +98734,12 @@ type ModifyNetworkInterfaceAttributeInput struct { // NetworkInterfaceId is a required field NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` - // Indicates whether source/destination checking is enabled. A value of true - // means checking is enabled, and false means checking is disabled. This value - // must be false for a NAT instance to perform NAT. For more information, see - // NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) - // in the Amazon Virtual Private Cloud User Guide. + // Enable or disable source/destination checks, which ensure that the instance + // is either the source or the destination of any traffic that it receives. + // If the value is true, source/destination checks are enabled; otherwise, they + // are disabled. The default value is true. You must disable source/destination + // checks if the instance runs services such as network address translation, + // routing, or firewalls. SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` } @@ -98293,7 +100171,8 @@ type ModifyVolumeInput struct { // // * io2: 100-64,000 IOPS // - // Default: If no IOPS value is specified, the existing value is retained. + // Default: If no IOPS value is specified, the existing value is retained, unless + // a volume type is modified that supports different values. Iops *int64 `type:"integer"` // Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, @@ -98630,8 +100509,6 @@ type ModifyVpcEndpointInput struct { // (Interface endpoint) Indicates whether a private hosted zone is associated // with the VPC. - // - // Private DNS is not supported for Amazon S3 interface endpoints. PrivateDnsEnabled *bool `type:"boolean"` // (Gateway endpoint) One or more route table IDs to disassociate from the endpoint. @@ -100536,6 +102413,9 @@ type NetworkInfo struct { // The index of the default network card, starting at 0. DefaultNetworkCardIndex *int64 `locationName:"defaultNetworkCardIndex" type:"integer"` + // Describes the Elastic Fabric Adapters for the instance type. + EfaInfo *EfaInfo `locationName:"efaInfo" type:"structure"` + // Indicates whether Elastic Fabric Adapter (EFA) is supported. EfaSupported *bool `locationName:"efaSupported" type:"boolean"` @@ -100581,6 +102461,12 @@ func (s *NetworkInfo) SetDefaultNetworkCardIndex(v int64) *NetworkInfo { return s } +// SetEfaInfo sets the EfaInfo field's value. +func (s *NetworkInfo) SetEfaInfo(v *EfaInfo) *NetworkInfo { + s.EfaInfo = v + return s +} + // SetEfaSupported sets the EfaSupported field's value. func (s *NetworkInfo) SetEfaSupported(v bool) *NetworkInfo { s.EfaSupported = &v @@ -100920,14 +102806,14 @@ type NetworkInterface struct { // The private IPv4 addresses associated with the network interface. PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` - // The ID of the entity that launched the instance on your behalf (for example, - // AWS Management Console or Auto Scaling). + // The alias or AWS account ID of the principal or service that created the + // network interface. RequesterId *string `locationName:"requesterId" type:"string"` // Indicates whether the network interface is being managed by AWS. RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"` - // Indicates whether traffic to or from the instance is validated. + // Indicates whether source/destination checking is enabled. SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` // The status of the network interface. @@ -104111,6 +105997,9 @@ type RegisterImageInput struct { // The block device mapping entries. // + // If you specify an EBS volume using the ID of an EBS snapshot, you can't specify + // the encryption state of the volume. + // // If you create an AMI on an Outpost, then all backing snapshots must be on // the same Outpost or in the Region of that Outpost. AMIs on an Outpost that // include local snapshots can be used to launch instances on the same Outpost @@ -104118,6 +106007,10 @@ type RegisterImageInput struct { // in the Amazon Elastic Compute Cloud User Guide. BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + // The boot mode of the AMI. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) + // in the Amazon Elastic Compute Cloud User Guide. + BootMode *string `type:"string" enum:"BootModeValues"` + // A description for your AMI. Description *string `locationName:"description" type:"string"` @@ -104215,6 +106108,12 @@ func (s *RegisterImageInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *Re return s } +// SetBootMode sets the BootMode field's value. +func (s *RegisterImageInput) SetBootMode(v string) *RegisterImageInput { + s.BootMode = &v + return s +} + // SetDescription sets the Description field's value. func (s *RegisterImageInput) SetDescription(v string) *RegisterImageInput { s.Description = &v @@ -105444,6 +107343,94 @@ func (s ReplaceNetworkAclEntryOutput) GoString() string { return s.String() } +// Information about a root volume replacement task. +type ReplaceRootVolumeTask struct { + _ struct{} `type:"structure"` + + // The time the task completed. + CompleteTime *string `locationName:"completeTime" type:"string"` + + // The ID of the instance for which the root volume replacement task was created. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of the root volume replacement task. + ReplaceRootVolumeTaskId *string `locationName:"replaceRootVolumeTaskId" type:"string"` + + // The time the task was started. + StartTime *string `locationName:"startTime" type:"string"` + + // The tags assigned to the task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The state of the task. The task can be in one of the following states: + // + // * pending - the replacement volume is being created. + // + // * in-progress - the original volume is being detached and the replacement + // volume is being attached. + // + // * succeeded - the replacement volume has been successfully attached to + // the instance and the instance is available. + // + // * failing - the replacement task is in the process of failing. + // + // * failed - the replacement task has failed but the original root volume + // is still attached. + // + // * failing-detached - the replacement task is in the process of failing. + // The instance might have no root volume attached. + // + // * failed-detached - the replacement task has failed and the instance has + // no root volume attached. + TaskState *string `locationName:"taskState" type:"string" enum:"ReplaceRootVolumeTaskState"` +} + +// String returns the string representation +func (s ReplaceRootVolumeTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRootVolumeTask) GoString() string { + return s.String() +} + +// SetCompleteTime sets the CompleteTime field's value. +func (s *ReplaceRootVolumeTask) SetCompleteTime(v string) *ReplaceRootVolumeTask { + s.CompleteTime = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ReplaceRootVolumeTask) SetInstanceId(v string) *ReplaceRootVolumeTask { + s.InstanceId = &v + return s +} + +// SetReplaceRootVolumeTaskId sets the ReplaceRootVolumeTaskId field's value. +func (s *ReplaceRootVolumeTask) SetReplaceRootVolumeTaskId(v string) *ReplaceRootVolumeTask { + s.ReplaceRootVolumeTaskId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ReplaceRootVolumeTask) SetStartTime(v string) *ReplaceRootVolumeTask { + s.StartTime = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplaceRootVolumeTask) SetTags(v []*Tag) *ReplaceRootVolumeTask { + s.Tags = v + return s +} + +// SetTaskState sets the TaskState field's value. +func (s *ReplaceRootVolumeTask) SetTaskState(v string) *ReplaceRootVolumeTask { + s.TaskState = &v + return s +} + type ReplaceRouteInput struct { _ struct{} `type:"structure"` @@ -106033,7 +108020,7 @@ type RequestLaunchTemplateData struct { // in the Amazon Elastic Compute Cloud User Guide. HibernationOptions *LaunchTemplateHibernationOptionsRequest `type:"structure"` - // The IAM instance profile. + // The name or Amazon Resource Name (ARN) of an IAM instance profile. IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecificationRequest `type:"structure"` // The ID of the AMI. @@ -106697,9 +108684,7 @@ type RequestSpotLaunchSpecification struct { // you can specify the names or the IDs of the security groups. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` - // The IDs of the subnets in which to launch the instance. To specify multiple - // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, - // subnet-0987cdef6example2". + // The ID of the subnet in which to launch the instance. SubnetId *string `locationName:"subnetId" type:"string"` // The Base64-encoded user data for the instance. User data is limited to 16 @@ -109524,7 +111509,7 @@ type RunInstancesInput struct { // You can't enable hibernation and AWS Nitro Enclaves on the same instance. HibernationOptions *HibernationOptionsRequest `type:"structure"` - // The IAM instance profile. + // The name or Amazon Resource Name (ARN) of an IAM instance profile. IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` // The ID of the AMI. An AMI ID is required to launch an instance and must be @@ -110122,6 +112107,47 @@ func (s *RunScheduledInstancesOutput) SetInstanceIdSet(v []*string) *RunSchedule return s } +// The tags to apply to the AMI object that will be stored in the S3 bucket. +// For more information, see Categorizing your storage using tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) +// in the Amazon Simple Storage Service User Guide. +type S3ObjectTag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Constraints: Tag keys are case-sensitive and can be up to 128 Unicode characters + // in length. May not begin with aws:. + Key *string `type:"string"` + + // The value of the tag. + // + // Constraints: Tag values are case-sensitive and can be up to 256 Unicode characters + // in length. + Value *string `type:"string"` +} + +// String returns the string representation +func (s S3ObjectTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3ObjectTag) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *S3ObjectTag) SetKey(v string) *S3ObjectTag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *S3ObjectTag) SetValue(v string) *S3ObjectTag { + s.Value = &v + return s +} + // Describes the storage parameters for S3 and S3 buckets for an instance store-backed // AMI. type S3Storage struct { @@ -112468,7 +114494,7 @@ type SnapshotDiskContainer struct { // The format of the disk image being imported. // - // Valid values: VHD | VMDK + // Valid values: VHD | VMDK | RAW Format *string `type:"string"` // The URL to the Amazon S3-based disk image being imported. It can either be @@ -113165,9 +115191,16 @@ type SpotFleetRequestConfigData struct { // If the allocation strategy is diversified, Spot Fleet launches instances // from all the Spot Instance pools that you specify. // - // If the allocation strategy is capacityOptimized, Spot Fleet launches instances - // from Spot Instance pools with optimal capacity for the number of instances - // that are launching. + // If the allocation strategy is capacityOptimized (recommended), Spot Fleet + // launches instances from Spot Instance pools with optimal capacity for the + // number of instances that are launching. To give certain instance types a + // higher chance of launching first, use capacityOptimizedPrioritized. Set a + // priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. + // You can assign the same priority to different LaunchTemplateOverrides. EC2 + // implements the priorities on a best-effort basis, but optimizes for capacity + // first. capacityOptimizedPrioritized is supported only if your Spot Fleet + // uses a launch template. Note that if the OnDemandAllocationStrategy is set + // to prioritized, the same priority is applied when fulfilling On-Demand capacity. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` // A unique, case-sensitive identifier that you provide to ensure the idempotency @@ -113943,9 +115976,16 @@ type SpotOptions struct { // If the allocation strategy is diversified, EC2 Fleet launches instances from // all of the Spot Instance pools that you specify. // - // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances - // from Spot Instance pools with optimal capacity for the number of instances - // that are launching. + // If the allocation strategy is capacity-optimized (recommended), EC2 Fleet + // launches instances from Spot Instance pools with optimal capacity for the + // number of instances that are launching. To give certain instance types a + // higher chance of launching first, use capacity-optimized-prioritized. Set + // a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. + // You can assign the same priority to different LaunchTemplateOverrides. EC2 + // implements the priorities on a best-effort basis, but optimizes for capacity + // first. capacity-optimized-prioritized is supported only if your fleet uses + // a launch template. Note that if the On-Demand AllocationStrategy is set to + // prioritized, the same priority is applied when fulfilling On-Demand capacity. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -114049,9 +116089,16 @@ type SpotOptionsRequest struct { // If the allocation strategy is diversified, EC2 Fleet launches instances from // all of the Spot Instance pools that you specify. // - // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances - // from Spot Instance pools with optimal capacity for the number of instances - // that are launching. + // If the allocation strategy is capacity-optimized (recommended), EC2 Fleet + // launches instances from Spot Instance pools with optimal capacity for the + // number of instances that are launching. To give certain instance types a + // higher chance of launching first, use capacity-optimized-prioritized. Set + // a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. + // You can assign the same priority to different LaunchTemplateOverrides. EC2 + // implements the priorities on a best-effort basis, but optimizes for capacity + // first. capacity-optimized-prioritized is supported only if your fleet uses + // a launch template. Note that if the On-Demand AllocationStrategy is set to + // prioritized, the same priority is applied when fulfilling On-Demand capacity. AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -114876,6 +116923,85 @@ func (s *StorageLocation) SetKey(v string) *StorageLocation { return s } +// The information about the AMI store task, including the progress of the task. +type StoreImageTaskResult struct { + _ struct{} `type:"structure"` + + // The ID of the AMI that is being stored. + AmiId *string `locationName:"amiId" type:"string"` + + // The name of the S3 bucket that contains the stored AMI object. + Bucket *string `locationName:"bucket" type:"string"` + + // The progress of the task as a percentage. + ProgressPercentage *int64 `locationName:"progressPercentage" type:"integer"` + + // The name of the stored AMI object in the bucket. + S3objectKey *string `locationName:"s3objectKey" type:"string"` + + // If the tasks fails, the reason for the failure is returned. If the task succeeds, + // null is returned. + StoreTaskFailureReason *string `locationName:"storeTaskFailureReason" type:"string"` + + // The state of the store task (InProgress, Completed, or Failed). + StoreTaskState *string `locationName:"storeTaskState" type:"string"` + + // The time the task started. + TaskStartTime *time.Time `locationName:"taskStartTime" type:"timestamp"` +} + +// String returns the string representation +func (s StoreImageTaskResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StoreImageTaskResult) GoString() string { + return s.String() +} + +// SetAmiId sets the AmiId field's value. +func (s *StoreImageTaskResult) SetAmiId(v string) *StoreImageTaskResult { + s.AmiId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *StoreImageTaskResult) SetBucket(v string) *StoreImageTaskResult { + s.Bucket = &v + return s +} + +// SetProgressPercentage sets the ProgressPercentage field's value. +func (s *StoreImageTaskResult) SetProgressPercentage(v int64) *StoreImageTaskResult { + s.ProgressPercentage = &v + return s +} + +// SetS3objectKey sets the S3objectKey field's value. +func (s *StoreImageTaskResult) SetS3objectKey(v string) *StoreImageTaskResult { + s.S3objectKey = &v + return s +} + +// SetStoreTaskFailureReason sets the StoreTaskFailureReason field's value. +func (s *StoreImageTaskResult) SetStoreTaskFailureReason(v string) *StoreImageTaskResult { + s.StoreTaskFailureReason = &v + return s +} + +// SetStoreTaskState sets the StoreTaskState field's value. +func (s *StoreImageTaskResult) SetStoreTaskState(v string) *StoreImageTaskResult { + s.StoreTaskState = &v + return s +} + +// SetTaskStartTime sets the TaskStartTime field's value. +func (s *StoreImageTaskResult) SetTaskStartTime(v time.Time) *StoreImageTaskResult { + s.TaskStartTime = &v + return s +} + // Describes a subnet. type Subnet struct { _ struct{} `type:"structure"` @@ -122009,6 +124135,9 @@ const ( // AllocationStrategyCapacityOptimized is a AllocationStrategy enum value AllocationStrategyCapacityOptimized = "capacityOptimized" + + // AllocationStrategyCapacityOptimizedPrioritized is a AllocationStrategy enum value + AllocationStrategyCapacityOptimizedPrioritized = "capacityOptimizedPrioritized" ) // AllocationStrategy_Values returns all elements of the AllocationStrategy enum @@ -122017,6 +124146,7 @@ func AllocationStrategy_Values() []string { AllocationStrategyLowestPrice, AllocationStrategyDiversified, AllocationStrategyCapacityOptimized, + AllocationStrategyCapacityOptimizedPrioritized, } } @@ -122320,6 +124450,38 @@ func BgpStatus_Values() []string { } } +const ( + // BootModeTypeLegacyBios is a BootModeType enum value + BootModeTypeLegacyBios = "legacy-bios" + + // BootModeTypeUefi is a BootModeType enum value + BootModeTypeUefi = "uefi" +) + +// BootModeType_Values returns all elements of the BootModeType enum +func BootModeType_Values() []string { + return []string{ + BootModeTypeLegacyBios, + BootModeTypeUefi, + } +} + +const ( + // BootModeValuesLegacyBios is a BootModeValues enum value + BootModeValuesLegacyBios = "legacy-bios" + + // BootModeValuesUefi is a BootModeValues enum value + BootModeValuesUefi = "uefi" +) + +// BootModeValues_Values returns all elements of the BootModeValues enum +func BootModeValues_Values() []string { + return []string{ + BootModeValuesLegacyBios, + BootModeValuesUefi, + } +} + const ( // BundleTaskStatePending is a BundleTaskState enum value BundleTaskStatePending = "pending" @@ -123673,6 +125835,9 @@ const ( // ImageAttributeNameSriovNetSupport is a ImageAttributeName enum value ImageAttributeNameSriovNetSupport = "sriovNetSupport" + + // ImageAttributeNameBootMode is a ImageAttributeName enum value + ImageAttributeNameBootMode = "bootMode" ) // ImageAttributeName_Values returns all elements of the ImageAttributeName enum @@ -123685,6 +125850,7 @@ func ImageAttributeName_Values() []string { ImageAttributeNameProductCodes, ImageAttributeNameBlockDeviceMapping, ImageAttributeNameSriovNetSupport, + ImageAttributeNameBootMode, } } @@ -125127,6 +127293,33 @@ const ( // InstanceTypeMac1Metal is a InstanceType enum value InstanceTypeMac1Metal = "mac1.metal" + + // InstanceTypeX2gdMedium is a InstanceType enum value + InstanceTypeX2gdMedium = "x2gd.medium" + + // InstanceTypeX2gdLarge is a InstanceType enum value + InstanceTypeX2gdLarge = "x2gd.large" + + // InstanceTypeX2gdXlarge is a InstanceType enum value + InstanceTypeX2gdXlarge = "x2gd.xlarge" + + // InstanceTypeX2gd2xlarge is a InstanceType enum value + InstanceTypeX2gd2xlarge = "x2gd.2xlarge" + + // InstanceTypeX2gd4xlarge is a InstanceType enum value + InstanceTypeX2gd4xlarge = "x2gd.4xlarge" + + // InstanceTypeX2gd8xlarge is a InstanceType enum value + InstanceTypeX2gd8xlarge = "x2gd.8xlarge" + + // InstanceTypeX2gd12xlarge is a InstanceType enum value + InstanceTypeX2gd12xlarge = "x2gd.12xlarge" + + // InstanceTypeX2gd16xlarge is a InstanceType enum value + InstanceTypeX2gd16xlarge = "x2gd.16xlarge" + + // InstanceTypeX2gdMetal is a InstanceType enum value + InstanceTypeX2gdMetal = "x2gd.metal" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -125521,6 +127714,15 @@ func InstanceType_Values() []string { InstanceTypeM6gd12xlarge, InstanceTypeM6gd16xlarge, InstanceTypeMac1Metal, + InstanceTypeX2gdMedium, + InstanceTypeX2gdLarge, + InstanceTypeX2gdXlarge, + InstanceTypeX2gd2xlarge, + InstanceTypeX2gd4xlarge, + InstanceTypeX2gd8xlarge, + InstanceTypeX2gd12xlarge, + InstanceTypeX2gd16xlarge, + InstanceTypeX2gdMetal, } } @@ -126096,6 +128298,30 @@ func OperationType_Values() []string { } } +const ( + // PartitionLoadFrequencyNone is a PartitionLoadFrequency enum value + PartitionLoadFrequencyNone = "none" + + // PartitionLoadFrequencyDaily is a PartitionLoadFrequency enum value + PartitionLoadFrequencyDaily = "daily" + + // PartitionLoadFrequencyWeekly is a PartitionLoadFrequency enum value + PartitionLoadFrequencyWeekly = "weekly" + + // PartitionLoadFrequencyMonthly is a PartitionLoadFrequency enum value + PartitionLoadFrequencyMonthly = "monthly" +) + +// PartitionLoadFrequency_Values returns all elements of the PartitionLoadFrequency enum +func PartitionLoadFrequency_Values() []string { + return []string{ + PartitionLoadFrequencyNone, + PartitionLoadFrequencyDaily, + PartitionLoadFrequencyWeekly, + PartitionLoadFrequencyMonthly, + } +} + const ( // PaymentOptionAllUpfront is a PaymentOption enum value PaymentOptionAllUpfront = "AllUpfront" @@ -126372,6 +128598,38 @@ func RecurringChargeFrequency_Values() []string { } } +const ( + // ReplaceRootVolumeTaskStatePending is a ReplaceRootVolumeTaskState enum value + ReplaceRootVolumeTaskStatePending = "pending" + + // ReplaceRootVolumeTaskStateInProgress is a ReplaceRootVolumeTaskState enum value + ReplaceRootVolumeTaskStateInProgress = "in-progress" + + // ReplaceRootVolumeTaskStateFailing is a ReplaceRootVolumeTaskState enum value + ReplaceRootVolumeTaskStateFailing = "failing" + + // ReplaceRootVolumeTaskStateSucceeded is a ReplaceRootVolumeTaskState enum value + ReplaceRootVolumeTaskStateSucceeded = "succeeded" + + // ReplaceRootVolumeTaskStateFailed is a ReplaceRootVolumeTaskState enum value + ReplaceRootVolumeTaskStateFailed = "failed" + + // ReplaceRootVolumeTaskStateFailedDetached is a ReplaceRootVolumeTaskState enum value + ReplaceRootVolumeTaskStateFailedDetached = "failed-detached" +) + +// ReplaceRootVolumeTaskState_Values returns all elements of the ReplaceRootVolumeTaskState enum +func ReplaceRootVolumeTaskState_Values() []string { + return []string{ + ReplaceRootVolumeTaskStatePending, + ReplaceRootVolumeTaskStateInProgress, + ReplaceRootVolumeTaskStateFailing, + ReplaceRootVolumeTaskStateSucceeded, + ReplaceRootVolumeTaskStateFailed, + ReplaceRootVolumeTaskStateFailedDetached, + } +} + const ( // ReplacementStrategyLaunch is a ReplacementStrategy enum value ReplacementStrategyLaunch = "launch" @@ -126957,6 +129215,9 @@ const ( // SpotAllocationStrategyCapacityOptimized is a SpotAllocationStrategy enum value SpotAllocationStrategyCapacityOptimized = "capacity-optimized" + + // SpotAllocationStrategyCapacityOptimizedPrioritized is a SpotAllocationStrategy enum value + SpotAllocationStrategyCapacityOptimizedPrioritized = "capacity-optimized-prioritized" ) // SpotAllocationStrategy_Values returns all elements of the SpotAllocationStrategy enum @@ -126965,6 +129226,7 @@ func SpotAllocationStrategy_Values() []string { SpotAllocationStrategyLowestPrice, SpotAllocationStrategyDiversified, SpotAllocationStrategyCapacityOptimized, + SpotAllocationStrategyCapacityOptimizedPrioritized, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go index 31c314e0e5f..47c44cc9df5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -4,8 +4,14 @@ // requests to Amazon Elastic Compute Cloud. // // Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing -// capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest +// capacity in the AWS Cloud. Using Amazon EC2 eliminates the need to invest // in hardware up front, so you can develop and deploy applications faster. +// Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically +// isolated section of the AWS Cloud where you can launch AWS resources in a +// virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS) +// provides block level storage volumes for use with EC2 instances. EBS volumes +// are highly available and reliable storage volumes that can be attached to +// any running instance and used like a hard drive. // // To learn more, see the following resources: // @@ -13,7 +19,7 @@ // EC2 documentation (http://aws.amazon.com/documentation/ec2) // // * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon -// EBS documentation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) +// EBS documentation (http://aws.amazon.com/documentation/ebs) // // * Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon // VPC documentation (http://aws.amazon.com/documentation/vpc) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go index b9bdbde157f..15b26e741d2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -982,7 +982,7 @@ func (c *EC2) WaitUntilSecurityGroupExistsWithContext(ctx aws.Context, input *De { State: request.RetryWaiterState, Matcher: request.ErrorWaiterMatch, - Expected: "InvalidGroupNotFound", + Expected: "InvalidGroup.NotFound", }, }, Logger: c.Config.Logger, diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go index 72e374e2bd2..60b726d0615 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -362,6 +362,10 @@ func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *requ // You use this operation to attach a managed policy to a group. To embed an // inline policy in a group, use PutGroupPolicy. // +// As a best practice, you can validate your IAM policies. To learn more, see +// Validating IAM policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_policy-validator.html) +// in the IAM User Guide. +// // For more information about policies, see Managed policies and inline policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. @@ -474,6 +478,10 @@ func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *reques // see Managed policies and inline policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // +// As a best practice, you can validate your IAM policies. To learn more, see +// Validating IAM policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_policy-validator.html) +// in the IAM User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -580,6 +588,10 @@ func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *reques // You use this operation to attach a managed policy to a user. To embed an // inline policy in a user, use PutUserPolicy. // +// As a best practice, you can validate your IAM policies. To learn more, see +// Validating IAM policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_policy-validator.html) +// in the IAM User Guide. +// // For more information about policies, see Managed policies and inline policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. @@ -1082,7 +1094,9 @@ func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (r // CreateInstanceProfile API operation for AWS Identity and Access Management. // // Creates a new instance profile. For information about instance profiles, -// see About instance profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entities). +// see Using roles for applications on Amazon EC2 (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) +// in the IAM User Guide, and Instance profiles (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile) +// in the Amazon EC2 User Guide. // // For information about the number of instance profiles you can create, see // IAM object quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) @@ -1414,6 +1428,10 @@ func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Reques // versions, see Versioning for managed policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) // in the IAM User Guide. // +// As a best practice, you can validate your IAM policies. To learn more, see +// Validating IAM policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_policy-validator.html) +// in the IAM User Guide. +// // For more information about managed policies in general, see Managed policies // and inline policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. @@ -5473,6 +5491,12 @@ func (c *IAM) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) (req // // See the AWS API reference guide for AWS Identity and Access Management's // API operation GetAccessKeyLastUsed for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccessKeyLastUsed func (c *IAM) GetAccessKeyLastUsed(input *GetAccessKeyLastUsedInput) (*GetAccessKeyLastUsedOutput, error) { req, out := c.GetAccessKeyLastUsedRequest(input) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index 34b282735ed..70f7d97d43b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -746,6 +746,9 @@ func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *re // records are not yet available on all Route 53 DNS servers. When the NS and // SOA records are available, the status of the zone changes to INSYNC. // +// The CreateHostedZone request requires the caller to have an ec2:DescribeVpcs +// permission. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2121,7 +2124,7 @@ func (c *Route53) DeleteKeySigningKeyRequest(input *DeleteKeySigningKeyInput) (r // DeleteKeySigningKey API operation for Amazon Route 53. // // Deletes a key-signing key (KSK). Before you can delete a KSK, you must deactivate -// it. The KSK must be deactived before you can delete it regardless of whether +// it. The KSK must be deactivated before you can delete it regardless of whether // the hosted zone is enabled for DNSSEC signing. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5318,7 +5321,7 @@ func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInp // // Lists the resource record sets in a specified hosted zone. // -// ListResourceRecordSets returns up to 100 resource record sets at a time in +// ListResourceRecordSets returns up to 300 resource record sets at a time in // ASCII order, beginning at a position specified by the name and type elements. // // Sort order @@ -6337,6 +6340,8 @@ func (c *Route53) TestDNSAnswerRequest(input *TestDNSAnswerInput) (req *request. // for a specified record name and type. You can optionally specify the IP address // of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask. // +// This call only supports querying public hosted zones. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8273,7 +8278,7 @@ type CreateKeySigningKeyInput struct { // The key policy must also include the Amazon Route 53 service in the principal // for your account. Specify the following: // - // * "Service": "api-service.dnssec.route53.aws.internal" + // * "Service": "dnssec.route53.aws.amazonaws.com" // // For more information about working with a customer managed CMK in AWS KMS, // see AWS Key Management Service concepts (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 89a0a29afff..6d15bad28f7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/internal/s3shared/arn" "github.com/aws/aws-sdk-go/private/checksum" "github.com/aws/aws-sdk-go/private/protocol" @@ -67,7 +68,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation aborts a multipart upload. After a multipart upload is aborted, +// This action aborts a multipart upload. After a multipart upload is aborted, // no additional parts can be uploaded using that upload ID. The storage consumed // by any previously uploaded parts will be freed. However, if any part uploads // are currently in progress, those part uploads might or might not succeed. @@ -76,10 +77,10 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // // To verify that all parts have been removed, so you don't get charged for // the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// operation and ensure that the parts list is empty. +// action and ensure that the parts list is empty. // -// For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// For information about permissions required to use the multipart upload, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to AbortMultipartUpload: // @@ -175,10 +176,10 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // You first initiate the multipart upload and then upload all parts using the // UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // operation. After successfully uploading all relevant parts of an upload, -// you call this operation to complete the upload. Upon receiving this request, +// you call this action to complete the upload. Upon receiving this request, // Amazon S3 concatenates all the parts in ascending order by part number to // create a new object. In the Complete Multipart Upload request, you must provide -// the parts list. You must ensure that the parts list is complete. This operation +// the parts list. You must ensure that the parts list is complete. This action // concatenates the parts that you provide in the list. For each part in the // list, you must provide the part number and the ETag value, returned after // that part was uploaded. @@ -199,7 +200,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // CompleteMultipartUpload has the following special errors: // @@ -306,10 +307,10 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Creates a copy of an object that is already stored in Amazon S3. // // You can store individual objects of up to 5 TB in Amazon S3. You create a -// copy of your object up to 5 GB in size in a single atomic operation using -// this API. However, to copy an object greater than 5 GB, you must use the -// multipart upload Upload Part - Copy API. For more information, see Copy Object -// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy API. For more information, see Copy Object Using +// the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // // All copy requests must be authenticated. Additionally, you must have read // access to the source object and write access to the destination bucket. For @@ -319,7 +320,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // A copy request might return an error when Amazon S3 receives the copy request // or while Amazon S3 is copying the files. If the error occurs before the copy -// operation starts, you receive a standard Amazon S3 error. If the error occurs +// action starts, you receive a standard Amazon S3 error. If the error occurs // during the copy operation, the error response is embedded in the 200 OK response. // This means that a 200 OK response can contain either a success or an error. // Design your application to parse the contents of the response and handle @@ -334,7 +335,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // The copy request charge is based on the storage class and Region that you // specify for the destination object. For pricing information, see Amazon S3 -// pricing (https://aws.amazon.com/s3/pricing/). +// pricing (http://aws.amazon.com/s3/pricing/). // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you @@ -404,7 +405,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the // object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -418,7 +419,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Storage Class Options // -// You can use the CopyObject operation to change the storage class of an object +// You can use the CopyObject action to change the storage class of an object // that is already stored in Amazon S3 using the StorageClass parameter. For // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 Service Developer Guide. @@ -459,8 +460,8 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" -// The source object of the COPY operation is not in the active tier and is -// only stored in Amazon S3 Glacier. +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -678,10 +679,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation initiates a multipart upload and returns an upload ID. This -// upload ID is used to associate all of the parts in the specific multipart -// upload. You specify this upload ID in each of your subsequent upload part -// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). // You also include this upload ID in the final request to either complete or // abort the multipart upload request. // @@ -691,12 +692,12 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // If you have configured a lifecycle rule to abort incomplete multipart uploads, // the upload must complete within the number of days specified in the bucket // lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort operation and Amazon S3 aborts the multipart upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// eligible for an abort action and Amazon S3 aborts the multipart upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // For information about the permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // For request signing, multipart upload is just a series of regular requests. // You initiate a multipart upload, send one or more requests to upload parts, @@ -716,7 +717,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // and decrypts it when you access it. You can provide your own encryption key, // or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or // Amazon S3-managed encryption keys. If you choose to provide your own encryption -// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. @@ -989,8 +990,8 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). @@ -1083,7 +1084,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // permission to others. // // For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Related Resources: // @@ -1164,17 +1165,17 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation removes default encryption from +// This implementation of the DELETE action removes default encryption from // the bucket. For information about the Amazon S3 default encryption feature, // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Related Resources // @@ -1361,8 +1362,8 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). @@ -1550,8 +1551,8 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about CloudWatch request metrics for Amazon S3, see Monitoring // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -1725,9 +1726,9 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation uses the policy subresource to -// delete the policy of a specified bucket. If you are using an identity other -// than the root user of the AWS account that owns the bucket, the calling identity +// This implementation of the DELETE action uses the policy subresource to delete +// the policy of a specified bucket. If you are using an identity other than +// the root user of the AWS account that owns the bucket, the calling identity // must have the DeleteBucketPolicy permissions on the specified bucket and // belong to the bucket owner's account to use this operation. // @@ -1827,8 +1828,8 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration // action. The bucket owner has these permissions by default and can grant it // to others. For more information about permissions, see Permissions Related -// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // It can take a while for the deletion of a replication configuration to fully // propagate. @@ -2000,15 +2001,15 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration for a bucket. Amazon S3 -// returns a 200 OK response upon successfully deleting a website configuration -// on the specified bucket. You will get a 200 OK response if the website configuration +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration // you are trying to delete does not exist on the bucket. Amazon S3 returns // a 404 response if the bucket specified in the request does not exist. // -// This DELETE operation requires the S3:DeleteBucketWebsite permission. By -// default, only the bucket owner can delete the website configuration attached -// to a bucket. However, bucket owners can grant other users permission to delete +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete // the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite // permission. // @@ -2095,7 +2096,8 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // // Removes the null version (if there is one) of an object and inserts a delete // marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects. +// null version, Amazon S3 does not remove any objects but will still respond +// that the command was successful. // // To remove a specific version, you must be the bucket owner and you must use // the version Id subresource. Using this subresource permanently deletes the @@ -2110,14 +2112,14 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). // To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). // -// You can delete objects by explicitly calling the DELETE Object API or configure -// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) // to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny // them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration // actions. // -// The following operation is related to DeleteObject: +// The following action is related to DeleteObject: // // * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // @@ -2285,27 +2287,27 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // DeleteObjects API operation for Amazon Simple Storage Service. // -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. If you know the object keys that you want to delete, -// then this operation provides a suitable alternative to sending individual -// delete requests, reducing per-request overhead. +// This action enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, +// then this action provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. // // The request contains a list of up to 1000 keys that you want to delete. In // the XML, you provide the object key names, and optionally, version IDs if // you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete operation and returns the +// bucket. For each key, Amazon S3 performs a delete action and returns the // result of that delete, success, or failure, in the response. Note that if // the object specified in the request is not found, Amazon S3 returns the result // as deleted. // -// The operation supports two modes for the response: verbose and quiet. By -// default, the operation uses verbose mode in which the response includes the -// result of deletion of each key in your request. In quiet mode the response -// includes only keys where the delete operation encountered an error. For a -// successful deletion, the operation does not return any information about -// the delete in the response body. +// The action supports two modes for the response: verbose and quiet. By default, +// the action uses verbose mode in which the response includes the result of +// deletion of each key in your request. In quiet mode the response includes +// only keys where the delete action encountered an error. For a successful +// deletion, the action does not return any information about the delete in +// the response body. // -// When performing this operation on an MFA Delete enabled bucket, that attempts +// When performing this action on an MFA Delete enabled bucket, that attempts // to delete any versioned objects, you must include an MFA token. If you do // not provide one, the entire request will fail, even if there are non-versioned // objects you are trying to delete. If you provide an invalid token, whether @@ -2404,8 +2406,8 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use // this operation, you must have the s3:PutBucketPublicAccessBlock permission. // For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The following operations are related to DeletePublicAccessBlock: // @@ -2489,17 +2491,17 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the accelerate subresource -// to return the Transfer Acceleration state of a bucket, which is either Enabled +// This implementation of the GET action uses the accelerate subresource to +// return the Transfer Acceleration state of a bucket, which is either Enabled // or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that // enables you to perform faster data transfers to and from Amazon S3. // // To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // You set the Transfer Acceleration state of an existing bucket to Enabled // or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) @@ -2511,7 +2513,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // For more information about transfer acceleration, see Transfer Acceleration // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Related Resources // @@ -2589,7 +2591,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the acl subresource to return +// This implementation of the GET action uses the acl subresource to return // the access control list (ACL) of a bucket. To use GET to return the ACL of // the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission // is granted to the anonymous user, you can return the ACL of the bucket without @@ -2671,19 +2673,19 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation returns an analytics configuration +// This implementation of the GET action returns an analytics configuration // (identified by the analytics configuration ID) from the bucket. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // For information about Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Related Resources // @@ -2852,15 +2854,18 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the default encryption configuration for an Amazon S3 bucket. For -// information about the Amazon S3 default encryption feature, see Amazon S3 -// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// Returns the default encryption configuration for an Amazon S3 bucket. If +// the bucket does not have a default encryption configuration, GetBucketEncryption +// returns ServerSideEncryptionConfigurationNotFoundError. +// +// For information about the Amazon S3 default encryption feature, see Amazon +// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). // // To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The following operations are related to GetBucketEncryption: // @@ -3045,8 +3050,8 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). @@ -3148,8 +3153,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // GetBucketLifecycle has the following special error: // @@ -3247,8 +3252,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // Accordingly, this section describes the latest API. The response describes // the new filter element that you can use to specify a filter to select a subset // of objects to which the rule applies. If you are using a previous version -// of the lifecycle configuration, it still works. For the earlier API description, -// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// of the lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). // // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). @@ -3256,8 +3261,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration // action. The bucket owner has this permission, by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // GetBucketLifecycleConfiguration has the following special error: // @@ -3516,8 +3521,8 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about CloudWatch request metrics for Amazon S3, see Monitoring // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -3689,8 +3694,8 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // -// If notifications are not enabled on the bucket, the operation returns an -// empty NotificationConfiguration element. +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. // // By default, you must be the bucket owner to read the notification configuration // of a bucket. However, the bucket owner can use a bucket policy to grant permission @@ -3701,7 +3706,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketNotification: +// The following action is related to GetBucketNotification: // // * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) // @@ -3879,7 +3884,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // For more information about bucket policies, see Using Bucket Policies and // User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketPolicy: +// The following action is related to GetBucketPolicy: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -4052,11 +4057,11 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // can return a wrong result. // // For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // -// This operation requires permissions for the s3:GetReplicationConfiguration -// action. For more information about permissions, see Using Bucket Policies -// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see Using Bucket Policies and User +// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // // If you include the Filter element in a replication configuration, you must // also include the DeleteMarkerReplication and Priority elements. The response @@ -4405,7 +4410,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // For more information about hosting websites, see Hosting Websites on Amazon // S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This GET operation requires the S3:GetBucketWebsite permission. By default, +// This GET action requires the S3:GetBucketWebsite permission. By default, // only the bucket owner can read the bucket website configuration. However, // bucket owners can allow other users to read the website configuration by // writing a bucket policy granting them the S3:GetBucketWebsite permission. @@ -4515,7 +4520,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering // Deep Archive tiers, before you can retrieve the object you must first restore // a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this operation returns an InvalidObjectStateError error. For information +// Otherwise, this action returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // // Encryption request headers, like x-amz-server-side-encryption, should not @@ -4558,8 +4563,8 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // Versioning // -// By default, the GET operation returns the current version of an object. To -// return a different version, use the versionId subresource. +// By default, the GET action returns the current version of an object. To return +// a different version, use the versionId subresource. // // If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -5026,7 +5031,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // subresource associated with the object. // // To use this operation, you must have permission to perform the s3:GetObjectTagging -// action. By default, the GET operation returns information about current version +// action. By default, the GET action returns information about current version // of an object. For a versioned bucket, you can have multiple versions of an // object in your bucket. To retrieve tags of any other version, use the versionId // query parameter. You also need permission for the s3:GetObjectVersionTagging @@ -5038,10 +5043,12 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // For information about the Amazon S3 object tagging feature, see Object Tagging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // -// The following operation is related to GetObjectTagging: +// The following action is related to GetObjectTagging: // // * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5126,7 +5133,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // This action is not supported by Amazon S3 on Outposts. // -// The following operation is related to GetObjectTorrent: +// The following action is related to GetObjectTorrent: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5300,16 +5307,20 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // HeadBucket API operation for Amazon Simple Storage Service. // -// This operation is useful to determine if a bucket exists and you have permission -// to access it. The operation returns a 200 OK if the bucket exists and you -// have permission to access it. Otherwise, the operation might return responses -// such as 404 Not Found and 403 Forbidden. +// This action is useful to determine if a bucket exists and you have permission +// to access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A +// message body is not included, so you cannot determine the exception beyond +// these error codes. // // To use this operation, you must have permissions to perform the s3:ListBucket // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5388,13 +5399,15 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // HeadObject API operation for Amazon Simple Storage Service. // -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. // -// A HEAD request has the same options as a GET operation on an object. The -// response is identical to the GET response except that there is no response -// body. +// A HEAD request has the same options as a GET action on an object. The response +// is identical to the GET response except that there is no response body. Because +// of this, if the HEAD request generates an error, it returns a generic 404 +// Not Found or 403 Forbidden code. It is not possible to retrieve the exact +// exception beyond these error codes. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -5409,11 +5422,14 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). // -// Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, you’ll -// get an HTTP 400 BadRequest error. +// * Encryption request headers, like x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon +// S3–managed encryption keys (SSE-S3). If your object does use these types +// of keys, you’ll get an HTTP 400 BadRequest error. +// +// * The last modified property in this case is the creation date of the +// object. // // Request headers are limited to 8 KB in size. For more information, see Common // Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). @@ -5445,7 +5461,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // * If you don’t have the s3:ListBucket permission, Amazon S3 returns // an HTTP status code 403 ("access denied") error. // -// The following operation is related to HeadObject: +// The following action is related to HeadObject: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5527,19 +5543,19 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // Lists the analytics configurations for the bucket. You can have up to 1,000 // analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated -// is set to false. If there are more configurations to list, IsTruncated is -// set to true, and there will be a value in NextContinuationToken. You use -// the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. +// This action supports list pagination and does not return more than 100 configurations +// at a time. You should always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there will be a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). @@ -5726,19 +5742,19 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // Returns a list of inventory configurations for the bucket. You can have up // to 1,000 analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) @@ -5827,19 +5843,19 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // are only for the request metrics of the bucket and do not provide information // on daily storage metrics. You can have up to 1,000 configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For more information about metrics configurations and CloudWatch request // metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -6004,11 +6020,11 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. An in-progress multipart +// This action lists in-progress multipart uploads. An in-progress multipart // upload is a multipart upload that has been initiated using the Initiate Multipart // Upload request, but has not yet been completed or aborted. // -// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// This action returns at most 1,000 multipart uploads in the response. 1,000 // multipart uploads is the maximum number of uploads a response can include, // which is also the default value. You can further limit the number of uploads // in a response by specifying the max-uploads parameter in the response. If @@ -6025,7 +6041,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListMultipartUploads: // @@ -6173,6 +6189,9 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // use request parameters as selection criteria to return metadata about a subset // of all the object versions. // +// To use this operation, you must have permissions to perform the s3:ListBucketVersions +// action. Be aware of the name difference. +// // A 200 OK response can contain valid or invalid XML. Make sure to design your // application to parse the contents of the response and handle it appropriately. // @@ -6326,8 +6345,8 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // to design your application to parse the contents of the response and handle // it appropriately. // -// This API has been revised. We recommend that you use the newer version, ListObjectsV2 -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), // when developing applications. For backward compatibility, Amazon S3 continues // to support ListObjects. // @@ -6482,18 +6501,19 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // the request parameters as selection criteria to return a subset of the objects // in a bucket. A 200 OK response can contain valid or invalid XML. Make sure // to design your application to parse the contents of the response and handle -// it appropriately. +// it appropriately. Objects are returned sorted in an ascending order of the +// respective key names in the list. // // To use this operation, you must have READ access to the bucket. // -// To use this operation in an AWS Identity and Access Management (IAM) policy, +// To use this action in an AWS Identity and Access Management (IAM) policy, // you must have permissions to perform the s3:ListBucket action. The bucket // owner has this permission by default and can grant this permission to others. // For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// This section describes the latest revision of the API. We recommend that +// This section describes the latest revision of this action. We recommend that // you use this revised API for application development. For backward compatibility, // Amazon S3 continues to support the prior version of this API, ListObjects // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). @@ -6658,7 +6678,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListParts: // @@ -6804,8 +6824,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The Transfer Acceleration state of a bucket can be set to one of the following // two values: @@ -6815,7 +6835,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // * Suspended – Disables accelerated data transfers to the bucket. // // The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// operation returns the transfer acceleration state of a bucket. +// action returns the transfer acceleration state of a bucket. // // After setting the Transfer Acceleration state of a bucket to Enabled, it // might take up to thirty minutes before the data transfer rates to the bucket @@ -7092,8 +7112,8 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Special Errors // @@ -7227,7 +7247,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // For more information about CORS, go to Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. +// S3 User Guide. // // Related Resources // @@ -7314,7 +7334,7 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// This operation uses the encryption subresource to configure default encryption +// This action uses the encryption subresource to configure default encryption // and Amazon S3 Bucket Key for an existing bucket. // // Default encryption for a bucket can use server-side encryption with Amazon @@ -7322,19 +7342,19 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // specify default encryption using SSE-KMS, you can also configure Amazon S3 // Bucket Key. For information about default encryption, see Amazon S3 default // bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. For more information -// about S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see +// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon S3 User Guide. // -// This operation requires AWS Signature Version 4. For more information, see -// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// This action requires AWS Signature Version 4. For more information, see Authenticating +// Requests (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Related Resources // @@ -7415,7 +7435,8 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You +// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access @@ -7442,6 +7463,22 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // // * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) // +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically +// move objects stored in the S3 Intelligent-Tiering storage class to the Archive +// Access or Deep Archive Access tier. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration +// bucket permission to set the configuration on the bucket. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7515,9 +7552,9 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the PUT operation adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. +// This implementation of the PUT action adds an inventory configuration (identified +// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations +// per bucket. // // Amazon S3 inventory generates inventories of the objects in the bucket on // a daily or weekly basis, and the results are published to a flat file. The @@ -7530,7 +7567,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // the inventory daily or weekly. You can also configure what object metadata // to include and whether to inventory all object versions or only current versions. // For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // You must create a bucket policy on the destination bucket to grant permissions // to Amazon S3 to write objects to the bucket in the defined location. For @@ -7540,9 +7577,9 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Special Errors // @@ -7654,7 +7691,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see // Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // By default, all Amazon S3 resources, including buckets, objects, and related // subresources (for example, lifecycle configuration and website configuration) @@ -7675,8 +7712,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // * s3:PutLifecycleConfiguration // // For more information about permissions, see Managing Access Permissions to -// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // For more examples of transitioning objects to storage classes such as STANDARD_IA // or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). @@ -7692,9 +7729,9 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // * By default, a resource owner—in this case, a bucket owner, which is // the AWS account that created the bucket—can perform any of the operations. // A resource owner can also grant others permission to perform the operation. -// For more information, see the following topics in the Amazon Simple Storage -// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// For more information, see the following topics in the Amazon S3 User Guide: +// Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7779,7 +7816,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see -// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, or a combination of both. @@ -7831,7 +7868,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // * s3:PutLifecycleConfiguration // // For more information about permissions, see Managing Access Permissions to -// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The following are related to PutBucketLifecycleConfiguration: // @@ -8048,8 +8085,8 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about CloudWatch request metrics for Amazon S3, see Monitoring // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -8245,8 +8282,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // // -// This operation replaces the existing notification configuration with the -// configuration you include in the request body. +// This action replaces the existing notification configuration with the configuration +// you include in the request body. // // After Amazon S3 receives this request, it first verifies that any Amazon // Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon @@ -8266,8 +8303,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // The PUT notification is an atomic operation. For example, suppose your notification // configuration includes SNS topic, SQS queue, and Lambda function configurations. // When you send a PUT request with this configuration, Amazon S3 sends test -// messages to your SNS topic. If the message fails, the entire PUT operation -// will fail, and Amazon S3 will not add the configuration to your bucket. +// messages to your SNS topic. If the message fails, the entire PUT action will +// fail, and Amazon S3 will not add the configuration to your bucket. // // Responses // @@ -8276,7 +8313,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // will also include the x-amz-sns-test-message-id header containing the message // ID of the test notification sent to the topic. // -// The following operation is related to PutBucketNotificationConfiguration: +// The following action is related to PutBucketNotificationConfiguration: // // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // @@ -8552,8 +8589,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // -// To perform this operation, the user or role performing the operation must -// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) // permission. // // Specify the replication configuration in the request body. In the replication @@ -8583,7 +8620,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // bucket, can perform this operation. The resource owner can also grant others // permissions to perform the operation. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Handling Replication of Encrypted Objects // @@ -8786,8 +8823,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // To use this operation, you must have permissions to perform the s3:PutBucketTagging // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // PutBucketTagging has the following special errors: // @@ -8801,7 +8838,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // match the schema. // // * Error code: OperationAbortedError Description: A conflicting conditional -// operation is currently in progress against this resource. Please try again. +// action is currently in progress against this resource. Please try again. // // * Error code: InternalError Description: The service was unable to apply // the provided tag to the bucket. @@ -9008,7 +9045,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // document and any redirect rules. For more information, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// This PUT action requires the S3:PutBucketWebsite permission. By default, // only the bucket owner can configure the website attached to a bucket; however, // bucket owners can allow other users to set the website configuration by writing // a bucket policy that grants them the S3:PutBucketWebsite permission. @@ -9067,7 +9104,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // Amazon S3 has a limitation of 50 routing rules per website configuration. // If you require more than 50 routing rules, you can use object redirect. For // more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9161,7 +9198,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // The Content-MD5 header is required for any request to upload an object with // a retention period configured using Amazon S3 Object Lock. For more information // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Server-side Encryption // @@ -9174,7 +9211,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // If you request server-side encryption using AWS Key Management Service (SSE-KMS), // you can enable an S3 Bucket Key at the object-level. For more information, // see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -9293,7 +9330,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // for a new or existing object in an S3 bucket. You must have WRITE_ACP permission // to set the ACL of an object. For more information, see What permissions can // I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // This action is not supported by Amazon S3 on Outposts. // @@ -9457,14 +9494,11 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // PutObjectLegalHold API operation for Amazon Simple Storage Service. // -// Applies a Legal Hold configuration to the specified object. +// Applies a Legal Hold configuration to the specified object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // This action is not supported by Amazon S3 on Outposts. // -// Related Resources -// -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9543,14 +9577,16 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // // Places an Object Lock configuration on the specified bucket. The rule specified // in the Object Lock configuration will be applied by default to every new -// object placed in the specified bucket. +// object placed in the specified bucket. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// DefaultRetention requires either Days or Years. You can't specify both at -// the same time. +// * The DefaultRetention settings require both a mode and a period. // -// Related Resources +// * The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. // -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// * You can only enable Object Lock for new buckets. If you want to turn +// on Object Lock for an existing bucket, contact AWS Support. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9628,14 +9664,11 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // PutObjectRetention API operation for Amazon Simple Storage Service. // -// Places an Object Retention configuration on an object. +// Places an Object Retention configuration on an object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // This action is not supported by Amazon S3 on Outposts. // -// Related Resources -// -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9741,7 +9774,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // * Code: MalformedXMLError Cause: The XML provided does not match the schema. // -// * Code: OperationAbortedError Cause: A conflicting conditional operation +// * Code: OperationAbortedError Cause: A conflicting conditional action // is currently in progress against this resource. Please try again. // // * Code: InternalError Cause: The service was unable to apply the provided @@ -9751,6 +9784,8 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9938,9 +9973,9 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // To use this operation, you must have permissions to perform the s3:RestoreObject // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Querying Archives with Select Requests // @@ -9950,7 +9985,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // queries and custom analytics on your archived data without having to restore // your data to a hotter Amazon S3 tier. For an overview about select requests, // see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // When making a select request, do the following: // @@ -9961,13 +9996,12 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the storage class and encryption for the output objects stored in the // bucket. For more information about output, see Querying Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. For more information -// about the S3 structure in the request body, see the following: PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing -// Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon Simple Storage Service Developer Guide Protecting Data Using -// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon S3 User Guide. For more information about the S3 structure +// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide // // * Define the SQL expression for the SELECT type of restoration for your // query in the request body's SelectParameters structure. You can use expressions @@ -9983,7 +10017,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about using SQL with S3 Glacier Select restore, see // SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // When making a select request, you can also do the following: // @@ -10054,19 +10088,19 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // You can use Amazon S3 restore speed upgrade to change the restore speed to // a faster speed while it is in progress. For more information, see Upgrading // the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // To get the status of object restoration, you can send a HEAD request. Operations // return the x-amz-restore header, which provides information about the restoration // status, in the response. You can use Amazon S3 event notifications to notify // you when a restore is initiated or completed. For more information, see Configuring // Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // After restoring an archived object, you can update the restoration period // by reissuing the request with a new period. Amazon S3 updates the restoration @@ -10081,11 +10115,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the object in 3 days. For more information about lifecycle configuration, // see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon Simple Storage Service Developer Guide. +// in Amazon S3 User Guide. // // Responses // -// A successful operation returns either the 200 OK or 202 Accepted status code. +// A successful action returns either the 200 OK or 202 Accepted status code. // // * If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -10112,7 +10146,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // // * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon S3 User Guide // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10123,7 +10157,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Returned Error Codes: // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier. +// This action is not allowed against this storage tier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { @@ -10200,7 +10234,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SelectObjectContent API operation for Amazon Simple Storage Service. // -// This operation filters the contents of an Amazon S3 object based on a simple +// This action filters the contents of an Amazon S3 object based on a simple // structured query language (SQL) statement. In the request, along with the // SQL expression, you must also specify a data serialization format (JSON, // CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse @@ -10212,18 +10246,18 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // For more information about using SQL with Amazon S3 Select, see SQL Reference // for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Permissions // // You must have s3:GetObject permission for this operation. Amazon S3 Select // does not support anonymous access. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Object Data Formats // @@ -10246,13 +10280,13 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon Simple Storage Service Developer Guide. For objects that -// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer -// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side -// encryption is handled transparently, so you don't need to specify anything. -// For more information about server-side encryption, including SSE-S3 and -// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon +// S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored +// in AWS Key Management Service (SSE-KMS), server-side encryption is handled +// transparently, so you don't need to specify anything. For more information +// about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting +// Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide. // // Working with the Response Body // @@ -10263,8 +10297,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // GetObject Support // -// The SelectObjectContent operation does not support the following GetObject -// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// The SelectObjectContent action does not support the following GetObject functionality. +// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // // * Range: Although you can specify a scan range for an Amazon S3 Select // request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) @@ -10274,7 +10308,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot // specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. // For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Special Errors // @@ -10567,11 +10601,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // For more information on multipart uploads, go to Multipart Upload Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the -// Amazon Simple Storage Service Developer Guide . +// Amazon S3 User Guide . // // For information on the permissions required to use the multipart upload API, -// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. // // You can optionally request server-side encryption where Amazon S3 encrypts // your data as it writes it to disks in its data centers and decrypts it for @@ -10581,7 +10615,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // match the headers you used in the request to initiate the upload by using // CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Server-side encryption is supported by the S3 Multipart Upload actions. Unless // you are using a customer-provided encryption key, you don't need to specify @@ -10697,10 +10731,10 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // The minimum allowable part size for a multipart upload is 5 MB. For more // information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Instead of using an existing object as part data, you might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action // and provide data in your request. // // You must initiate a multipart upload before you can upload any part. In response @@ -10711,15 +10745,15 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // * For conceptual information about multipart uploads, see Uploading Objects // Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // * For information about permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. // -// * For information about copying objects using a single atomic operation -// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon Simple Storage Service Developer Guide. +// * For information about copying objects using a single atomic action vs. +// the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon S3 User Guide. // // * For information about using server-side encryption with customer-provided // encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) @@ -10808,11 +10842,128 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp return out, req.Send() } +const opWriteGetObjectResponse = "WriteGetObjectResponse" + +// WriteGetObjectResponseRequest generates a "aws/request.Request" representing the +// client's request for the WriteGetObjectResponse operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See WriteGetObjectResponse for more information on using the WriteGetObjectResponse +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the WriteGetObjectResponseRequest method. +// req, resp := client.WriteGetObjectResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (req *request.Request, output *WriteGetObjectResponseOutput) { + op := &request.Operation{ + Name: opWriteGetObjectResponse, + HTTPMethod: "POST", + HTTPPath: "/WriteGetObjectResponse", + } + + if input == nil { + input = &WriteGetObjectResponseInput{} + } + + output = &WriteGetObjectResponseOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Sign.Remove(v4.SignRequestHandler) + handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) + req.Handlers.Sign.PushFrontNamed(handler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{RequestRoute}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// WriteGetObjectResponse API operation for Amazon Simple Storage Service. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// Access Points. For information about Object Lambda Access Points, see Transforming +// objects with Object Lambda Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), +// in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. +// The GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an AWS Lambda function, can provide the same metadata when +// it internally invokes GetObject. When WriteGetObjectResponse is called by +// a customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// AWS provides some prebuilt Lambda functions that you can use with S3 Object +// Lambda to detect and redact personally identifiable information (PII) and +// decompress S3 objects. These Lambda functions are available in the AWS Serverless +// Application Repository, and can be selected through the AWS Management Console +// when you create your Object Lambda Access Point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, +// a natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, +// is equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see Using AWS built +// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) +// in the Amazon S3 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation WriteGetObjectResponse for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponse(input *WriteGetObjectResponseInput) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + return out, req.Send() +} + +// WriteGetObjectResponseWithContext is the same as WriteGetObjectResponse with the addition of +// the ability to pass a context and additional request options. +// +// See WriteGetObjectResponse for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetObjectResponseInput, opts ...request.Option) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` @@ -10842,25 +10993,25 @@ type AbortMultipartUploadInput struct { // The bucket name to which the upload was taking place. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -11008,7 +11159,7 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart // Configures the transfer acceleration state for an Amazon S3 bucket. For more // information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` @@ -11486,7 +11637,7 @@ func (s *Bucket) SetName(v string) *Bucket { // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -11580,7 +11731,7 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. +// S3 User Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` @@ -11656,6 +11807,9 @@ type CORSRule struct { // object). ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + // The time in seconds that your browser is to cache the preflight response // for the specified resource. MaxAgeSeconds *int64 `type:"integer"` @@ -11711,6 +11865,12 @@ func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { return s } +// SetID sets the ID field's value. +func (s *CORSRule) SetID(v string) *CORSRule { + s.ID = &v + return s +} + // SetMaxAgeSeconds sets the MaxAgeSeconds field's value. func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { s.MaxAgeSeconds = &v @@ -11991,7 +12151,7 @@ type CompleteMultipartUploadInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -12127,20 +12287,20 @@ type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. Bucket *string `type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side @@ -12341,6 +12501,10 @@ type Condition struct { // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals // is not specified. If both conditions are specified, both must be true for // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). KeyPrefixEquals *string `type:"string"` } @@ -12409,20 +12573,20 @@ type CopyObjectInput struct { // The name of the destination bucket. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12432,8 +12596,8 @@ type CopyObjectInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a COPY operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a COPY action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. @@ -12455,7 +12619,7 @@ type CopyObjectInput struct { // Specifies the source object for the copy operation. You specify the value // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): // // * For objects not accessed through an access point, specify the name of // the source bucket and the key of the source object, separated by a slash @@ -12513,12 +12677,12 @@ type CopyObjectInput struct { // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account id of the expected destination bucket owner. If the destination + // The account ID of the expected destination bucket owner. If the destination // bucket is owned by a different account, the request will fail with an HTTP // 403 (Access Denied) error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account id of the expected source bucket owner. If the source bucket + // The account ID of the expected source bucket owner. If the source bucket // is owned by a different account, the request will fail with an HTTP 403 (Access // Denied) error. ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` @@ -13083,10 +13247,10 @@ type CopyObjectResult struct { // Returns the ETag of the new object. The ETag reflects only changes to the // contents of an object, not its metadata. The source and destination ETag - // is identical for a successfully copied object. + // is identical for a successfully copied non-multipart object. ETag *string `type:"string"` - // Returns the date that the object was last modified. + // Creation date of the object. LastModified *time.Time `type:"timestamp"` } @@ -13326,20 +13490,20 @@ type CreateMultipartUploadInput struct { // The name of the bucket to which to initiate the upload // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13349,7 +13513,7 @@ type CreateMultipartUploadInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with an object operation doesn’t affect bucket-level + // Specifying this header with an object action doesn’t affect bucket-level // settings for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` @@ -13370,7 +13534,7 @@ type CreateMultipartUploadInput struct { // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -13447,7 +13611,7 @@ type CreateMultipartUploadInput struct { // object encryption. All GET and PUT requests for an object protected by AWS // KMS will fail if not made via SSL or using SigV4. For information about configuring // using any of the officially supported AWS SDKs and AWS CLI, see Specifying - // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` @@ -13740,20 +13904,20 @@ type CreateMultipartUploadOutput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. Bucket *string `locationName:"Bucket" type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side @@ -13886,17 +14050,24 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo // The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. +// +// * The DefaultRetention settings require both a mode and a period. +// +// * The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. type DefaultRetention struct { _ struct{} `type:"structure"` // The number of days that you want to specify for the default retention period. + // Must be used with Mode. Days *int64 `type:"integer"` // The default Object Lock retention mode you want to apply to new objects placed - // in the specified bucket. + // in the specified bucket. Must be used with either Days or Years. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` // The number of years that you want to specify for the default retention period. + // Must be used with Mode. Years *int64 `type:"integer"` } @@ -13995,7 +14166,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14109,7 +14280,7 @@ type DeleteBucketCorsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14210,7 +14381,7 @@ type DeleteBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14310,7 +14481,7 @@ type DeleteBucketInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14500,7 +14671,7 @@ type DeleteBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14614,7 +14785,7 @@ type DeleteBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14714,7 +14885,7 @@ type DeleteBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14842,7 +15013,7 @@ type DeleteBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14942,7 +15113,7 @@ type DeleteBucketPolicyInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15042,7 +15213,7 @@ type DeleteBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15142,7 +15313,7 @@ type DeleteBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15242,7 +15413,7 @@ type DeleteBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15438,20 +15609,20 @@ type DeleteObjectInput struct { // The bucket name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15460,7 +15631,7 @@ type DeleteObjectInput struct { // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15644,30 +15815,31 @@ type DeleteObjectTaggingInput struct { // The bucket name containing the objects from which to remove the tags. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Name of the object key. + // The key that identifies the object in the bucket from which to remove all + // tags. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -15794,20 +15966,20 @@ type DeleteObjectsInput struct { // The bucket name containing the objects to delete. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15822,7 +15994,7 @@ type DeleteObjectsInput struct { // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15952,7 +16124,7 @@ type DeleteObjectsOutput struct { // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` - // Container for a failed delete operation that describes the object that Amazon + // Container for a failed delete action that describes the object that Amazon // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` @@ -15997,7 +16169,7 @@ type DeletePublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -16423,9 +16595,9 @@ type Error struct { // Forbidden SOAP Fault Code Prefix: Client // // * Code: AccountProblem Description: There is a problem with your AWS account - // that prevents the operation from completing successfully. Contact AWS - // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault - // Code Prefix: Client + // that prevents the action from completing successfully. Contact AWS Support + // for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault Code + // Prefix: Client // // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource // has been disabled. Contact AWS Support for further assistance. HTTP Status @@ -16528,9 +16700,9 @@ type Error struct { // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client // - // * Code: InvalidObjectState Description: The operation is not valid for - // the current state of the object. HTTP Status Code: 403 Forbidden SOAP - // Fault Code Prefix: Client + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client // // * Code: InvalidPart Description: One or more of the specified parts could // not be found. The part might not have been uploaded, or the specified @@ -16695,7 +16867,7 @@ type Error struct { // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status // Code: 403 Forbidden SOAP Fault Code Prefix: Client // - // * Code: OperationAborted Description: A conflicting conditional operation + // * Code: OperationAborted Description: A conflicting conditional action // is currently in progress against this resource. Try again. HTTP Status // Code: 409 Conflict SOAP Fault Code Prefix: Client // @@ -16821,6 +16993,10 @@ type ErrorDocument struct { // The object key name to use when a 4XX class error occurs. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Key is a required field Key *string `min:"1" type:"string" required:"true"` } @@ -16905,7 +17081,7 @@ type FilterRule struct { // the filtering rule applies. The maximum length is 1,024 characters. Overlapping // prefixes and suffixes are not supported. For more information, see Configuring // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Name *string `type:"string" enum:"FilterRuleName"` // The value that the filter searches for in object key names. @@ -16942,7 +17118,7 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17051,7 +17227,7 @@ type GetBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17169,7 +17345,7 @@ type GetBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17292,7 +17468,7 @@ type GetBucketCorsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17403,7 +17579,7 @@ type GetBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17625,7 +17801,7 @@ type GetBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17748,7 +17924,7 @@ type GetBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17857,7 +18033,7 @@ type GetBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17966,7 +18142,7 @@ type GetBucketLocationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18077,7 +18253,7 @@ type GetBucketLoggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18189,7 +18365,7 @@ type GetBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18312,7 +18488,7 @@ type GetBucketNotificationConfigurationRequest struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18398,7 +18574,7 @@ type GetBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18508,7 +18684,7 @@ type GetBucketPolicyInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18617,7 +18793,7 @@ type GetBucketPolicyStatusInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18726,7 +18902,7 @@ type GetBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18836,7 +19012,7 @@ type GetBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18945,7 +19121,7 @@ type GetBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19056,7 +19232,7 @@ type GetBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19176,7 +19352,7 @@ type GetBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19310,17 +19486,17 @@ type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19484,25 +19660,25 @@ type GetObjectInput struct { // The bucket name containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19565,14 +19741,14 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` - // Specifies the algorithm to use to when encrypting the object (for example, + // Specifies the algorithm to use to when decrypting the object (for example, // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt + // the data. This value is used to decrypt the object when recovering it and + // must match the one used when storing the data. The key must be appropriate + // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -19784,17 +19960,17 @@ type GetObjectLegalHoldInput struct { // The bucket name containing the object whose Legal Hold status you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19939,17 +20115,17 @@ type GetObjectLockConfigurationInput struct { // The bucket whose Object Lock configuration you want to retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20103,7 +20279,7 @@ type GetObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Last modified date of the object + // Creation date of the object. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -20140,7 +20316,7 @@ type GetObjectOutput struct { // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time + // Provides information about object restoration action and expiration time // of the restored object copy. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` @@ -20387,17 +20563,17 @@ type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20542,25 +20718,25 @@ type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20570,6 +20746,13 @@ type GetObjectTaggingInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -20631,6 +20814,12 @@ func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + // SetVersionId sets the VersionId field's value. func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { s.VersionId = &v @@ -20707,7 +20896,7 @@ type GetObjectTorrentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20857,7 +21046,7 @@ type GetPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -21149,25 +21338,25 @@ type HeadBucketInput struct { // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -21264,25 +21453,25 @@ type HeadObjectInput struct { // The name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -21555,7 +21744,7 @@ type HeadObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Last modified date of the object + // Creation date of the object. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -21636,7 +21825,7 @@ type HeadObjectOutput struct { // If an archive copy is already restored, the header value indicates when Amazon // S3 is scheduled to delete the object copy. For example: // - // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 // GMT" // // If the object restoration is in progress, the header returns the value ongoing-request="true". @@ -21881,6 +22070,10 @@ type IndexDocument struct { // with the key name images/index.html) The suffix must not be empty and must // not include a slash character. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Suffix is a required field Suffix *string `type:"string" required:"true"` } @@ -22164,6 +22357,10 @@ type IntelligentTieringFilter struct { // An object key name prefix that identifies the subset of objects to which // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // A container of a key value name pair. @@ -22705,14 +22902,14 @@ type LambdaFunctionConfiguration struct { // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` // An optional unique identifier for configurations in a notification configuration. @@ -22880,7 +23077,7 @@ type LifecycleRule struct { // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` // Specifies the expiration for the lifecycle of the object in the form of date, @@ -22888,7 +23085,8 @@ type LifecycleRule struct { Expiration *LifecycleExpiration `type:"structure"` // The Filter is used to identify objects that a Lifecycle Rule applies to. - // A Filter must have exactly one of Prefix, Tag, or And specified. + // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not containt a Prefix element. Filter *LifecycleRuleFilter `type:"structure"` // Unique identifier for the rule. The value cannot be longer than 255 characters. @@ -22909,7 +23107,11 @@ type LifecycleRule struct { NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is - // No longer used; use Filter instead. + // no longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -23073,6 +23275,10 @@ type LifecycleRuleFilter struct { And *LifecycleRuleAndOperator `type:"structure"` // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // This tag must exist in the object's tag set in order for the rule to apply. @@ -23139,7 +23345,7 @@ type ListBucketAnalyticsConfigurationsInput struct { // should begin. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -23433,7 +23639,7 @@ type ListBucketInventoryConfigurationsInput struct { // that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -23586,7 +23792,7 @@ type ListBucketMetricsConfigurationsInput struct { // value that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -23777,20 +23983,20 @@ type ListMultipartUploadsInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23813,7 +24019,7 @@ type ListMultipartUploadsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24125,7 +24331,7 @@ type ListObjectVersionsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24134,7 +24340,7 @@ type ListObjectVersionsInput struct { KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. If additional keys satisfy the search criteria, // but were not returned because max-keys was exceeded, the response contains // true. To return the additional keys, see key-marker @@ -24417,20 +24623,20 @@ type ListObjectsInput struct { // The name of the bucket containing the objects. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -24446,7 +24652,7 @@ type ListObjectsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24455,7 +24661,7 @@ type ListObjectsInput struct { Marker *string `location:"querystring" locationName:"marker" type:"string"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` @@ -24579,8 +24785,8 @@ func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { type ListObjectsOutput struct { _ struct{} `type:"structure"` - // All of the keys rolled up in a common prefix count as a single return when - // calculating the number of returns. + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -24711,20 +24917,20 @@ type ListObjectsV2Input struct { // Bucket name to list. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -24740,7 +24946,7 @@ type ListObjectsV2Input struct { // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24751,7 +24957,7 @@ type ListObjectsV2Input struct { FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` @@ -24891,8 +25097,8 @@ func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { type ListObjectsV2Output struct { _ struct{} `type:"structure"` - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -24936,31 +25142,31 @@ type ListObjectsV2Output struct { IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys + // always be less than or equals to MaxKeys field. Say you ask for 50 keys, + // your result will include less than equals 50 keys KeyCount *int64 `type:"integer"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `type:"integer"` // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. Name *string `type:"string"` // NextContinuationToken is sent when isTruncated is true, which means there @@ -25063,25 +25269,25 @@ type ListPartsInput struct { // The name of the bucket to which the parts are being uploaded. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -25983,7 +26189,7 @@ type NoncurrentVersionTransition struct { // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -26145,7 +26351,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -26195,7 +26401,7 @@ type Object struct { // the object. Key *string `min:"1" type:"string"` - // The date the Object was Last Modified + // Creation date of the object. LastModified *time.Time `type:"timestamp"` // The owner of the object @@ -26258,7 +26464,11 @@ func (s *Object) SetStorageClass(v string) *Object { type ObjectIdentifier struct { _ struct{} `type:"structure"` - // Key name of the object to delete. + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -26309,10 +26519,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - // Indicates whether this bucket has an Object Lock configuration enabled. + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - // The Object Lock rule in place for the specified object. + // Specifies the Object Lock rule for the specified object. Enable the this + // rule when you apply ObjectLockConfiguration to a bucket. Bucket settings + // require both a mode and a period. The period can be either Days or Years + // but you must select one. You cannot specify Days and Years at the same time. Rule *ObjectLockRule `type:"structure"` } @@ -26399,8 +26613,10 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti type ObjectLockRule struct { _ struct{} `type:"structure"` - // The default retention period that you want to apply to new objects placed - // in the specified bucket. + // The default Object Lock retention mode and period that you want to apply + // to new objects placed in the specified bucket. Bucket settings require both + // a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. DefaultRetention *DefaultRetention `type:"structure"` } @@ -26899,7 +27115,7 @@ func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstr // S3 bucket. You can enable the configuration options in any combination. For // more information about when Amazon S3 considers a bucket or object public, // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -26990,7 +27206,7 @@ type PutBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27105,7 +27321,7 @@ type PutBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27273,7 +27489,7 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27404,12 +27620,12 @@ type PutBucketCorsInput struct { // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon - // Simple Storage Service Developer Guide. + // S3 User Guide. // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27522,12 +27738,12 @@ type PutBucketEncryptionInput struct { // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS // (SSE-KMS). For information about the Amazon S3 default encryption feature, // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27769,7 +27985,7 @@ type PutBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27902,7 +28118,7 @@ type PutBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28014,7 +28230,7 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28133,7 +28349,7 @@ type PutBucketLoggingInput struct { // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28247,7 +28463,7 @@ type PutBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28380,7 +28596,7 @@ type PutBucketNotificationConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28500,7 +28716,7 @@ type PutBucketNotificationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28614,7 +28830,7 @@ type PutBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28738,7 +28954,7 @@ type PutBucketPolicyInput struct { // to change this bucket policy in the future. ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28858,7 +29074,7 @@ type PutBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28987,7 +29203,7 @@ type PutBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29106,7 +29322,7 @@ type PutBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29225,7 +29441,7 @@ type PutBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29349,7 +29565,7 @@ type PutBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29473,17 +29689,17 @@ type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the // ACL. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29512,22 +29728,22 @@ type PutObjectAclInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Key for which the PUT operation was initiated. + // Key for which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -29722,22 +29938,22 @@ type PutObjectInput struct { // Object data. Body io.ReadSeeker `type:"blob"` - // The bucket name to which the PUT operation was initiated. + // The bucket name to which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -29747,8 +29963,8 @@ type PutObjectInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a PUT operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -29786,7 +30002,7 @@ type PutObjectInput struct { // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29815,7 +30031,7 @@ type PutObjectInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Object key for which the PUT operation was initiated. + // Object key for which the PUT action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -30181,17 +30397,17 @@ type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a Legal Hold // on. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30350,7 +30566,7 @@ type PutObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30611,20 +30827,20 @@ type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object Retention // configuration to. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether this operation should bypass Governance-mode restrictions. + // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30787,25 +31003,25 @@ type PutObjectTaggingInput struct { // The bucket name containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30815,6 +31031,13 @@ type PutObjectTaggingInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Container for the TagSet and Tag elements // // Tagging is a required field @@ -30889,6 +31112,12 @@ func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + // SetTagging sets the Tagging field's value. func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { s.Tagging = v @@ -30960,7 +31189,7 @@ type PutPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30969,7 +31198,7 @@ type PutPublicAccessBlockInput struct { // S3 bucket. You can enable the configuration options in any combination. For // more information about when Amazon S3 considers a bucket or object public, // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // PublicAccessBlockConfiguration is a required field PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -31082,7 +31311,7 @@ type QueueConfiguration struct { // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` // An optional unique identifier for configurations in a notification configuration. @@ -31158,7 +31387,7 @@ type QueueConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` - // A collection of bucket events for which to send notifications + // A collection of bucket events for which to send notifications. Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. @@ -31275,11 +31504,19 @@ type Redirect struct { // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required // if one of the siblings is present. Can be present only if ReplaceKeyWith // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect // request to error.html. Not required if one of the siblings is present. Can // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). ReplaceKeyWith *string `type:"string"` } @@ -31428,7 +31665,7 @@ type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the AWS Identity and Access Management // (IAM) role that Amazon S3 assumes when replicating objects. For more information, // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Role is a required field Role *string `type:"string" required:"true"` @@ -31529,6 +31766,10 @@ type ReplicationRule struct { // the rule applies. The maximum prefix length is 1,024 characters. To include // all objects in a bucket, specify an empty string. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -31539,7 +31780,7 @@ type ReplicationRule struct { // with the highest priority. The higher the number, the higher the priority. // // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Priority *int64 `type:"integer"` // A container that describes additional filters for identifying the source @@ -31665,7 +31906,7 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { // an And tag. // // * If you specify a filter based on multiple tags, wrap the Tag elements -// in an And tag +// in an And tag. type ReplicationRuleAndOperator struct { _ struct{} `type:"structure"` @@ -31737,6 +31978,10 @@ type ReplicationRuleFilter struct { // An object key name prefix that identifies the subset of objects to which // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // A container for specifying a tag key and value. @@ -31946,30 +32191,30 @@ type RestoreObjectInput struct { // The bucket name containing the object to restore. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Object key for which the operation was initiated. + // Object key for which the action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -32240,7 +32485,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { // Specifies the redirect behavior and when a redirect is applied. For more // information about routing rules, see Configuring advanced conditional redirects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type RoutingRule struct { _ struct{} `type:"structure"` @@ -32296,7 +32541,7 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { // Specifies lifecycle rules for an Amazon S3 bucket. For more information, // see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) // in the Amazon Simple Storage Service API Reference. For examples, see Put -// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). type Rule struct { _ struct{} `type:"structure"` @@ -32304,7 +32549,7 @@ type Rule struct { // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` // Specifies the expiration for the lifecycle of the object. @@ -32332,6 +32577,10 @@ type Rule struct { // Object key prefix that identifies one or more objects to which this rule // applies. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Prefix is a required field Prefix *string `type:"string" required:"true"` @@ -32344,7 +32593,7 @@ type Rule struct { // Specifies when an object transitions to a specified storage class. For more // information about Amazon S3 lifecycle configuration rules, see Transitioning // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Transition *Transition `type:"structure"` } @@ -32703,7 +32952,7 @@ type SelectObjectContentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -33168,7 +33417,7 @@ type ServerSideEncryptionRule struct { // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. // // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. BucketKeyEnabled *bool `type:"boolean"` } @@ -33735,14 +33984,14 @@ type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more information, // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` // An optional unique identifier for configurations in a notification configuration. @@ -33868,7 +34117,7 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep // Specifies when an object transitions to a specified storage class. For more // information about Amazon S3 lifecycle configuration rules, see Transitioning // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type Transition struct { _ struct{} `type:"structure"` @@ -33917,27 +34166,27 @@ type UploadPartCopyInput struct { // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies the source object for the copy operation. You specify the value // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): // // * For objects not accessed through an access point, specify the name of // the source bucket and key of the source object, separated by a slash (/). @@ -34001,12 +34250,12 @@ type UploadPartCopyInput struct { // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account id of the expected destination bucket owner. If the destination + // The account ID of the expected destination bucket owner. If the destination // bucket is owned by a different account, the request will fail with an HTTP // 403 (Access Denied) error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account id of the expected source bucket owner. If the source bucket + // The account ID of the expected source bucket owner. If the source bucket // is owned by a different account, the request will fail with an HTTP 403 (Access // Denied) error. ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` @@ -34359,20 +34608,20 @@ type UploadPartInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -34386,7 +34635,7 @@ type UploadPartInput struct { // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -34793,6 +35042,452 @@ func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfigu return s } +type WriteGetObjectResponseInput struct { + _ struct{} `locationName:"WriteGetObjectResponseRequest" type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"x-amz-fwd-header-accept-ranges" type:"string"` + + // The object data. + // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + Body io.ReadSeeker `type:"blob"` + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"x-amz-fwd-header-Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"x-amz-fwd-header-Content-Language" type:"string"` + + // The size of the content body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"x-amz-fwd-header-Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"x-amz-fwd-header-Content-Type" type:"string"` + + // Specifies whether an object stored in Amazon S3 is (true) or is not (false) + // a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-delete-marker" type:"boolean"` + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string `location:"header" locationName:"x-amz-fwd-header-ETag" type:"string"` + + // A string that uniquely identifies an error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in the body. All error codes from S3 are sentence-cased. Regex + // value is "^[A-Z][a-zA-Z]+$". + ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"` + + // Contains a generic description of the error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in body. + ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"` + + // If object stored in Amazon S3 expiration is configured (see PUT Bucket lifecycle) + // it includes expiry-date and rule-id key-value pairs providing object expiration + // information. The value of the rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"x-amz-fwd-header-Expires" type:"timestamp"` + + // The date and time that the object was last modified. + LastModified *time.Time `location:"header" locationName:"x-amz-fwd-header-Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Set to the number of metadata entries not returned in x-amz-meta headers. + // This can happen if you create metadata using an API like SOAP that supports + // more flexible metadata than the REST API. For example, using SOAP, you can + // create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-missing-meta" type:"integer"` + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-mp-parts-count" type:"integer"` + + // Indicates if request involves bucket that is either a source or destination + // in a Replication rule. For more information about S3 Replication, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Route prefix to the HTTP URL generated. + // + // RequestRoute is a required field + RequestRoute *string `location:"header" locationName:"x-amz-request-route" type:"string" required:"true"` + + // A single use encrypted token that maps WriteGetObjectResponse to the end + // user GetObject request. + // + // RequestToken is a required field + RequestToken *string `location:"header" locationName:"x-amz-request-token" type:"string" required:"true"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-fwd-header-x-amz-restore" type:"string"` + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 + // to encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for stored + // in Amazon S3 object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The integer status code for an HTTP response of a corresponding GetObject + // request. + // + // Status Codes + // + // * 200 - OK + // + // * 206 - Partial Content + // + // * 304 - Not Modified + // + // * 400 - Bad Request + // + // * 401 - Unauthorized + // + // * 403 - Forbidden + // + // * 404 - Not Found + // + // * 405 - Method Not Allowed + // + // * 409 - Conflict + // + // * 411 - Length Required + // + // * 412 - Precondition Failed + // + // * 416 - Range Not Satisfiable + // + // * 500 - Internal Server Error + // + // * 503 - Service Unavailable + StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"` + + // The class of storage used to store object in Amazon S3. + StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-tagging-count" type:"integer"` + + // An ID used to reference a specific version of the object. + VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s WriteGetObjectResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteGetObjectResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteGetObjectResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteGetObjectResponseInput"} + if s.RequestRoute == nil { + invalidParams.Add(request.NewErrParamRequired("RequestRoute")) + } + if s.RequestRoute != nil && len(*s.RequestRoute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestRoute", 1)) + } + if s.RequestToken == nil { + invalidParams.Add(request.NewErrParamRequired("RequestToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *WriteGetObjectResponseInput) SetAcceptRanges(v string) *WriteGetObjectResponseInput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *WriteGetObjectResponseInput) SetBody(v io.ReadSeeker) *WriteGetObjectResponseInput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *WriteGetObjectResponseInput) SetBucketKeyEnabled(v bool) *WriteGetObjectResponseInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectResponseInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *WriteGetObjectResponseInput) SetContentEncoding(v string) *WriteGetObjectResponseInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *WriteGetObjectResponseInput) SetContentLanguage(v string) *WriteGetObjectResponseInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *WriteGetObjectResponseInput) SetContentLength(v int64) *WriteGetObjectResponseInput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *WriteGetObjectResponseInput) SetContentRange(v string) *WriteGetObjectResponseInput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *WriteGetObjectResponseInput) SetContentType(v string) *WriteGetObjectResponseInput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *WriteGetObjectResponseInput) SetDeleteMarker(v bool) *WriteGetObjectResponseInput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *WriteGetObjectResponseInput) SetETag(v string) *WriteGetObjectResponseInput { + s.ETag = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *WriteGetObjectResponseInput) SetErrorCode(v string) *WriteGetObjectResponseInput { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *WriteGetObjectResponseInput) SetErrorMessage(v string) *WriteGetObjectResponseInput { + s.ErrorMessage = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *WriteGetObjectResponseInput) SetExpiration(v string) *WriteGetObjectResponseInput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *WriteGetObjectResponseInput) SetExpires(v time.Time) *WriteGetObjectResponseInput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *WriteGetObjectResponseInput) SetLastModified(v time.Time) *WriteGetObjectResponseInput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *WriteGetObjectResponseInput) SetMetadata(v map[string]*string) *WriteGetObjectResponseInput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *WriteGetObjectResponseInput) SetMissingMeta(v int64) *WriteGetObjectResponseInput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockLegalHoldStatus(v string) *WriteGetObjectResponseInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockMode(v string) *WriteGetObjectResponseInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockRetainUntilDate(v time.Time) *WriteGetObjectResponseInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *WriteGetObjectResponseInput) SetPartsCount(v int64) *WriteGetObjectResponseInput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *WriteGetObjectResponseInput) SetReplicationStatus(v string) *WriteGetObjectResponseInput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *WriteGetObjectResponseInput) SetRequestCharged(v string) *WriteGetObjectResponseInput { + s.RequestCharged = &v + return s +} + +// SetRequestRoute sets the RequestRoute field's value. +func (s *WriteGetObjectResponseInput) SetRequestRoute(v string) *WriteGetObjectResponseInput { + s.RequestRoute = &v + return s +} + +// SetRequestToken sets the RequestToken field's value. +func (s *WriteGetObjectResponseInput) SetRequestToken(v string) *WriteGetObjectResponseInput { + s.RequestToken = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *WriteGetObjectResponseInput) SetRestore(v string) *WriteGetObjectResponseInput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerAlgorithm(v string) *WriteGetObjectResponseInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerKeyMD5(v string) *WriteGetObjectResponseInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *WriteGetObjectResponseInput) SetSSEKMSKeyId(v string) *WriteGetObjectResponseInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *WriteGetObjectResponseInput) SetServerSideEncryption(v string) *WriteGetObjectResponseInput { + s.ServerSideEncryption = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *WriteGetObjectResponseInput) SetStatusCode(v int64) *WriteGetObjectResponseInput { + s.StatusCode = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *WriteGetObjectResponseInput) SetStorageClass(v string) *WriteGetObjectResponseInput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *WriteGetObjectResponseInput) SetTagCount(v int64) *WriteGetObjectResponseInput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *WriteGetObjectResponseInput) SetVersionId(v string) *WriteGetObjectResponseInput { + s.VersionId = &v + return s +} + +func (s *WriteGetObjectResponseInput) hostLabels() map[string]string { + return map[string]string{ + "RequestRoute": aws.StringValue(s.RequestRoute), + } +} + +type WriteGetObjectResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s WriteGetObjectResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteGetObjectResponseOutput) GoString() string { + return s.String() +} + const ( // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value AnalyticsS3ExportFileFormatCsv = "CSV" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index f1959b03a95..ce87ab32017 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -48,6 +48,8 @@ func defaultInitRequestFn(r *request.Request) { // case opGetObject: // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + case opWriteGetObjectResponse: + r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go index 6346b927960..9fc2105fd2f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -1,6 +1,8 @@ package s3 import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" "net/url" "strings" @@ -11,6 +13,13 @@ import ( "github.com/aws/aws-sdk-go/internal/s3shared/arn" ) +const ( + s3Namespace = "s3" + s3AccessPointNamespace = "s3-accesspoint" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + // Used by shapes with members decorated as endpoint ARN. func parseEndpointARN(v string) (arn.Resource, error) { return arn.ParseResource(v, accessPointResourceParser) @@ -20,10 +29,14 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { resParts := arn.SplitResource(a.Resource) switch resParts[0] { case "accesspoint": - if a.Service != "s3" { - return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"} + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} } - return arn.ParseAccessPointResource(a, resParts[1:]) case "outpost": if a.Service != "s3-outposts" { return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} @@ -80,6 +93,25 @@ func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.Outpo return outpostAccessPointARN, nil } +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + if len(accessPointARN.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} + func endpointHandler(req *request.Request) { endpoint, ok := req.Params.(endpointARNGetter) if !ok || !endpoint.hasEndpointARN() { @@ -116,6 +148,11 @@ func endpointHandler(req *request.Request) { if err != nil { req.Error = err } + case arn.S3ObjectLambdaAccessPointARN: + err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } case arn.OutpostAccessPointARN: // outposts does not support FIPS regions if resReq.ResourceConfiguredForFIPS() { @@ -162,6 +199,31 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce return nil } +func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { + // DualStack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { // Accelerate not supported if aws.BoolValue(req.Config.S3UseAccelerate) { @@ -192,3 +254,37 @@ func removeBucketFromPath(u *url.URL) { u.Path = "/" } } + +func buildWriteGetObjectResponseEndpoint(req *request.Request) { + // DualStack not supported + if aws.BoolValue(req.Config.UseDualStack) { + req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) + return + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil) + return + } + + signingName := s3ObjectsLambdaNamespace + signingRegion := req.ClientInfo.SigningRegion + + if !hasCustomEndpoint(req) { + endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), EndpointsID) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) + return + } + signingRegion = endpoint.SigningRegion + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + req.Error = err + return + } + updateS3HostPrefixForS3ObjectLambda(req) + } + + redirectSigner(req, signingName, signingRegion) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go index eb77d981ef6..71e9c9eff06 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -66,13 +66,9 @@ func (a accessPointEndpointBuilder) build(req *request.Request) error { if err = updateRequestEndpoint(req, endpoint.URL); err != nil { return err } - const serviceEndpointLabel = "s3-accesspoint" // dual stack provided by endpoint resolver - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, "s3") { - req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] - } + updateS3HostForS3AccessPoint(req) } protocol.HostPrefixBuilder{ @@ -98,6 +94,73 @@ func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { } } +// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn +type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name. +// +func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, EndpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + endpoint.SigningName = s3ObjectsLambdaNamespace + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + updateS3HostPrefixForS3ObjectLambda(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err) + } + + return nil +} + +func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID, + } +} + // outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN @@ -114,7 +177,7 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { resolveService := o.Service endpointsID := resolveService - if resolveService == "s3-outposts" { + if resolveService == s3OutpostsNamespace { endpointsID = "s3" } @@ -130,11 +193,7 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { if err = updateRequestEndpoint(req, endpoint.URL); err != nil { return err } - // add url host as s3-outposts - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, endpointsID) { - req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] - } + updateHostPrefix(req, endpointsID, resolveService) } protocol.HostPrefixBuilder{ @@ -170,7 +229,6 @@ func resolveRegionalEndpoint(r *request.Request, region string, endpointsID stri } func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { - r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) if err != nil { return awserr.New(request.ErrCodeSerialization, @@ -185,3 +243,19 @@ func redirectSigner(req *request.Request, signingName string, signingRegion stri req.ClientInfo.SigningName = signingName req.ClientInfo.SigningRegion = signingRegion } + +func updateS3HostForS3AccessPoint(req *request.Request) { + updateHostPrefix(req, "s3", s3AccessPointNamespace) +} + +func updateS3HostPrefixForS3ObjectLambda(req *request.Request) { + updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace) +} + +func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.HTTPRequest.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // replace service hostlabel oldEndpointPrefix to newEndpointPrefix + req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index f64b55135ee..6d3e726cf51 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -48,13 +48,13 @@ const ( // ErrCodeObjectAlreadyInActiveTierError for service response error code // "ObjectAlreadyInActiveTierError". // - // This operation is not allowed against this storage tier. + // This action is not allowed against this storage tier. ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // - // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon S3 Glacier. + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index bfc4372f9fd..17c46378899 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -65,34 +65,6 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// You cannot use AWS account root user credentials to call AssumeRole. You -// must use credentials for an IAM user or an IAM role to call AssumeRole. -// -// For cross-account access, imagine that you own multiple accounts and need -// to access resources in each account. You could create long-term credentials -// in each account to access those resources. However, managing all those credentials -// and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account. -// Then use temporary security credentials to access all the other accounts -// by assuming roles in those accounts. For more information about roles, see -// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) -// in the IAM User Guide. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRole last -// for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// // Permissions // // The temporary security credentials created by AssumeRole can be used to make @@ -102,7 +74,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline +// use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and @@ -308,6 +280,15 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your AWS CLI or AWS API role session to a maximum of one hour. When +// you use the AssumeRole API operation to assume a role, you can specify the +// duration of your role session with the DurationSeconds parameter. You can +// specify a parameter value of up to 43200 seconds (12 hours), depending on +// the maximum session duration setting for your role. However, if you assume +// a role using role chaining and provide a DurationSeconds parameter value +// greater than one hour, the operation fails. +// // Permissions // // The temporary security credentials created by AssumeRoleWithSAML can be used @@ -317,7 +298,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline +// use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and @@ -346,16 +327,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // -// You can pass up to 50 session tags. The plain text session tag keys can’t +// You can pass up to 50 session tags. The plaintext session tag keys can’t // exceed 128 characters and the values can’t exceed 256 characters. For these // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. +// for this limit even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags +// for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -564,7 +545,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline +// use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and @@ -583,16 +564,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // -// You can pass up to 50 session tags. The plain text session tag keys can’t +// You can pass up to 50 session tags. The plaintext session tag keys can’t // exceed 128 characters and the values can’t exceed 256 characters. For these // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. +// for this limit even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags +// for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -619,7 +600,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail // logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided Web Identity Token. We recommend that you avoid using any +// of the provided web identity token. We recommend that you avoid using any // personally identifiable information (PII) in this field. For example, you // could instead use a GUID or a pairwise identifier, as suggested in the OIDC // specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). @@ -1108,6 +1089,70 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline // and managed session policies can't exceed 2,048 characters. // @@ -1338,14 +1383,15 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken type AssumeRoleInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // The duration, in seconds, of the role session. The value specified can can + // range from 900 seconds (15 minutes) up to the maximum session duration that + // is set for the role. The maximum session duration setting can have a value + // from 1 hour to 12 hours. If you specify a value higher than this setting + // or the administrator setting (whichever is lower), the operation fails. For + // example, if you specify a session duration of 12 hours, but your administrator + // set the maximum session duration to 6 hours, your operation fails. To learn + // how to view the maximum value for your role, see View the Maximum Session + // Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1387,17 +1433,17 @@ type AssumeRoleInput struct { // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1405,16 +1451,16 @@ type AssumeRoleInput struct { // as the role. // // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1459,22 +1505,41 @@ type AssumeRoleInput struct { // also include underscores or any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string"` + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in AWS CloudTrail logs to + // determine who took actions with a role. You can use the aws:SourceIdentity + // condition key to further control access to AWS resources based on the value + // of source identity. For more information about using source identity, see + // Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@-. You cannot + // use a value that begins with the text aws:. This prefix is reserved for AWS + // internal use. + SourceIdentity *string `min:"2" type:"string"` + // A list of session tags that you want to pass. Each session tag consists of // a key name and an associated value. For more information about session tags, // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters, and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1495,9 +1560,10 @@ type AssumeRoleInput struct { Tags []*Tag `type:"list"` // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. + // assumed requires MFA. (In other words, if the policy includes a condition + // that tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. // // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. @@ -1554,6 +1620,9 @@ func (s *AssumeRoleInput) Validate() error { if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) } + if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) + } if s.TokenCode != nil && len(*s.TokenCode) < 6 { invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) } @@ -1626,6 +1695,12 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { + s.SourceIdentity = &v + return s +} + // SetTags sets the Tags field's value. func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { s.Tags = v @@ -1668,6 +1743,23 @@ type AssumeRoleOutput struct { // packed size is greater than 100 percent, which means the policies and tags // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in AWS CloudTrail logs to + // determine who took actions with a role. You can use the aws:SourceIdentity + // condition key to further control access to AWS resources based on the value + // of source identity. For more information about using source identity, see + // Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` } // String returns the string representation @@ -1698,6 +1790,12 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { + s.SourceIdentity = &v + return s +} + type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` @@ -1736,17 +1834,17 @@ type AssumeRoleWithSAMLInput struct { // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1754,16 +1852,16 @@ type AssumeRoleWithSAMLInput struct { // as the role. // // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1786,7 +1884,7 @@ type AssumeRoleWithSAMLInput struct { // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` - // The base-64 encoded SAML authentication response provided by the IdP. + // The base64 encoded SAML authentication response provided by the IdP. // // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. @@ -1908,10 +2006,17 @@ type AssumeRoleWithSAMLOutput struct { // The value of the Issuer element of the SAML assertion. Issuer *string `type:"string"` - // A hash value based on the concatenation of the Issuer response value, the - // AWS account ID, and the friendly name (the last part of the ARN) of the SAML - // provider in IAM. The combination of NameQualifier and Subject can be used - // to uniquely identify a federated user. + // A hash value based on the concatenation of the following: + // + // * The Issuer response value. + // + // * The AWS account ID. + // + // * The friendly name (the last part of the ARN) of the SAML provider in + // IAM. + // + // The combination of NameQualifier and Subject can be used to uniquely identify + // a federated user. // // The following pseudocode shows how the hash value is calculated: // @@ -1925,6 +2030,26 @@ type AssumeRoleWithSAMLOutput struct { // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity + // when calling AssumeRoleWithSAML. You do this by adding an attribute to the + // SAML assertion. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + // The value of the NameID element in the Subject element of the SAML assertion. Subject *string `type:"string"` @@ -1985,6 +2110,12 @@ func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithS return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { + s.SourceIdentity = &v + return s +} + // SetSubject sets the Subject field's value. func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { s.Subject = &v @@ -2032,17 +2163,17 @@ type AssumeRoleWithWebIdentityInput struct { // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2050,16 +2181,16 @@ type AssumeRoleWithWebIdentityInput struct { // as the role. // // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2242,6 +2373,29 @@ type AssumeRoleWithWebIdentityOutput struct { // in the AssumeRoleWithWebIdentity request. Provider *string `type:"string"` + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute associated + // with your users, like user name or email, as the source identity when calling + // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + // The unique user identifier that is returned by the identity provider. This // identifier is associated with the WebIdentityToken that was submitted with // the AssumeRoleWithWebIdentity call. The identifier is typically unique to @@ -2291,6 +2445,12 @@ func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithW return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { + s.SourceIdentity = &v + return s +} + // SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { s.SubjectFromWebIdentityToken = &v @@ -2682,17 +2842,17 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2702,7 +2862,7 @@ type GetFederationTokenInput struct { // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plain text that you use for both inline + // use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. You can provide // up to 10 managed policy ARNs. For more information about ARNs, see Amazon // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) @@ -2727,9 +2887,9 @@ type GetFederationTokenInput struct { // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -2737,17 +2897,17 @@ type GetFederationTokenInput struct { // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index b5c755e181c..165a8be4b7a 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -16,6 +16,7 @@ import ( "github.com/containers/image/v5/image" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/pkg/platform" + internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/compression" @@ -28,8 +29,8 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vbauerster/mpb/v5" - "github.com/vbauerster/mpb/v5/decor" + "github.com/vbauerster/mpb/v6" + "github.com/vbauerster/mpb/v6/decor" "golang.org/x/crypto/ssh/terminal" "golang.org/x/sync/semaphore" ) @@ -46,7 +47,7 @@ var ( // ErrDecryptParamsMissing is returned if there is missing decryption parameters ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present") - // maxParallelDownloads is used to limit the maxmimum number of parallel + // maxParallelDownloads is used to limit the maximum number of parallel // downloads. Let's follow Firefox by limiting it to 6. maxParallelDownloads = uint(6) ) @@ -108,19 +109,20 @@ func (d *digestingReader) Read(p []byte) (int, error) { // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache internalblobinfocache.BlobInfoCache2 - copyInParallel bool - compressionFormat compression.Algorithm - compressionLevel *int - ociDecryptConfig *encconfig.DecryptConfig - ociEncryptConfig *encconfig.EncryptConfig - maxParallelDownloads uint + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache internalblobinfocache.BlobInfoCache2 + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int + ociDecryptConfig *encconfig.DecryptConfig + ociEncryptConfig *encconfig.EncryptConfig + maxParallelDownloads uint + downloadForeignLayers bool } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -194,6 +196,13 @@ type Options struct { OciDecryptConfig *encconfig.DecryptConfig // MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0. MaxParallelDownloads uint + // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already + // exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option + // is slightly pessimistic if the destination image doesn't exist, or is not equivalent. + OptimizeDestinationImageAlreadyExists bool + // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type + // to not indicate "nondistributable". + DownloadForeignLayers bool } // validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value @@ -269,10 +278,11 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), - ociDecryptConfig: options.OciDecryptConfig, - ociEncryptConfig: options.OciEncryptConfig, - maxParallelDownloads: options.MaxParallelDownloads, + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + ociDecryptConfig: options.OciDecryptConfig, + ociEncryptConfig: options.OciEncryptConfig, + maxParallelDownloads: options.MaxParallelDownloads, + downloadForeignLayers: options.DownloadForeignLayers, } // Default to using gzip compression unless specified otherwise. if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { @@ -361,6 +371,45 @@ func supportsMultipleImages(dest types.ImageDestination) bool { return false } +// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the +// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. +func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { + srcManifest, _, err := src.Manifest(ctx) + if err != nil { + return false, nil, "", "", errors.Wrapf(err, "Error reading manifest from image") + } + + srcManifestDigest, err := manifest.Digest(srcManifest) + if err != nil { + return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest") + } + + destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx) + if err != nil { + logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err) + return false, nil, "", "", nil + } + + destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) + return false, nil, "", "", nil + } + + destManifestDigest, err := manifest.Digest(destManifest) + if err != nil { + return false, nil, "", "", errors.Wrapf(err, "Error calculating manifest digest") + } + + logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) + if srcManifestDigest != destManifestDigest { + return false, nil, "", "", nil + } + + // Destination and source manifests, types and digests should all be equivalent + return true, destManifest, destManifestType, destManifestDigest, nil +} + // copyMultipleImages copies some or all of an image list's instances, using // policyContext to validate source image admissibility. func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, copiedManifestType string, retErr error) { @@ -646,6 +695,26 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // If encrypted and decryption keys provided, we should try to decrypt ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil + // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal + if options.OptimizeDestinationImageAlreadyExists { + shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + noPendingManifestUpdates := ic.noPendingManifestUpdates() + + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates { + isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest) + if err != nil { + logrus.Warnf("Failed to compare destination image manifest: %v", err) + return nil, "", "", err + } + + if isSrcDestManifestEqual { + c.Printf("Skipping: image already present at destination\n") + return retManifest, retManifestType, retManifestDigest, nil + } + } + } + if err := ic.copyLayers(ctx); err != nil { return nil, "", "", err } @@ -702,6 +771,9 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) } } + if targetInstance != nil { + targetInstance = &retManifestDigest + } if options.SignBy != "" { newSig, err := c.createSignature(manifestBytes, options.SignBy) @@ -781,6 +853,10 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { return nil } +func (ic *imageCopier) noPendingManifestUpdates() bool { + return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) +} + // isTTY returns true if the io.Writer is a file and a tty. func isTTY(w io.Writer) bool { if f, ok := w.(*os.File); ok { @@ -834,7 +910,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { defer copySemaphore.Release(1) defer copyGroup.Done() cld := copyLayerData{} - if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. @@ -845,7 +921,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } } else { - cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool) + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index) } data[index] = cld } @@ -901,6 +977,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { diffIDs[i] = cld.diffID } + // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the + // OptimizeDestinationImageAlreadyExists short-circuit conditions ic.manifestUpdates.InformationOnly.LayerInfos = destInfos if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs @@ -929,7 +1007,7 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { // and its digest. func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { pendingImage := ic.src - if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { + if !ic.noPendingManifestUpdates() { if !ic.canModifyManifest { return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") } @@ -1012,10 +1090,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind ), ) } else { - bar = pool.AddSpinner(info.Size, - mpb.SpinnerOnLeft, + bar = pool.Add(0, + mpb.NewSpinnerFiller([]string{".", "..", "...", "....", ""}, mpb.SpinnerOnLeft), mpb.BarFillerClearOnComplete(), - mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}), mpb.PrependDecorators( decor.OnComplete(decor.Name(prefix), onComplete), ), @@ -1040,7 +1117,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { progressPool, progressCleanup := c.newProgressPool(ctx) defer progressCleanup() bar := c.createProgressBar(progressPool, srcInfo, "config", "done") - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar) + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1) if err != nil { return types.BlobInfo{}, err } @@ -1066,7 +1143,27 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int) (types.BlobInfo, digest.Digest, error) { + // If the srcInfo doesn't contain compression information, try to compute it from the + // MediaType, which was either read from a manifest by way of LayerInfos() or constructed + // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, + // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), + // which uses the compression information to compute the updated MediaType values. + // (Sadly UpdatedImage() is documented to not update MediaTypes from + // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) + // + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + if srcInfo.CompressionAlgorithm == nil { + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + srcInfo.CompressionAlgorithm = &compression.Gzip + case imgspecv1.MediaTypeImageLayerZstd: + srcInfo.CompressionAlgorithm = &compression.Zstd + } + } + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" // Diffs are needed if we are encrypting an image or trying to decrypt an image diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) @@ -1079,7 +1176,26 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. // Fixing that will probably require passing more information to TryReusingBlob() than the current version of // the ImageDestination interface lets us pass in. - reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) + var ( + blobInfo types.BlobInfo + reused bool + err error + ) + // Note: the storage destination optimizes the committing of + // layers which requires passing the index of the layer. + // Hence, we need to special case and cast. + dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions) + if ok { + options := internalTypes.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: ic.canSubstituteBlobs, + LayerIndex: &layerIndex, + } + reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options) + } else { + reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) + } + if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) } @@ -1095,6 +1211,19 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to Artifact: srcInfo, } } + + // If the reused blob has the same digest as the one we asked for, but + // the transport didn't/couldn't supply compression info, fill it in based + // on what we know from the srcInfos we were given. + // If the srcInfos came from LayerInfosForCopy(), then UpdatedImage() will + // call UpdateLayerInfos(), which uses this information to compute the + // MediaType value for the updated layer infos, and it the transport + // didn't pass the information along from its input to its output, then + // it can derive the MediaType incorrectly. + if blobInfo.Digest == srcInfo.Digest && blobInfo.CompressionAlgorithm == nil { + blobInfo.CompressionOperation = srcInfo.CompressionOperation + blobInfo.CompressionAlgorithm = srcInfo.CompressionAlgorithm + } return blobInfo, cachedDiffID, nil } } @@ -1108,7 +1237,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar) + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex) if err != nil { return types.BlobInfo{}, "", err } @@ -1139,7 +1268,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -1164,7 +1293,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -1216,7 +1345,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int) (types.BlobInfo, error) { if isConfig { // This is guaranteed by the caller, but set it here to be explicit. canModifyBlob = false } @@ -1234,8 +1363,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) } - var destStream io.Reader = digestingReader + + // === Decrypt the stream, if required. var decrypted bool if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { newDesc := imgspecv1.Descriptor{ @@ -1265,12 +1395,13 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } isCompressed := decompressor != nil - destStream = bar.ProxyReader(destStream) - if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() { logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) } + // === Update progress bars + destStream = bar.ProxyReader(destStream) + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. if getOriginalLayerCopyWriter != nil { @@ -1279,6 +1410,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // === Deal with layer compression/decompression if necessary + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions var inputInfo types.BlobInfo var compressionOperation types.LayerCompression uploadCompressionFormat := &c.compressionFormat @@ -1349,10 +1482,18 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr compressionOperation = types.PreserveOriginal inputInfo = srcInfo uploadCompressorName = srcCompressorName - uploadCompressionFormat = nil + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + if isCompressed { + uploadCompressionFormat = &compressionFormat + } else { + uploadCompressionFormat = nil + } } - // Perform image encryption for valid mediatypes if ociEncryptConfig provided + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided var ( encrypted bool finalizer ocicrypt.EncryptLayerFinalizer @@ -1400,7 +1541,23 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // === Finally, send the layer stream to dest. - uploadedInfo, err := c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig) + var uploadedInfo types.BlobInfo + // Note: the storage destination optimizes the committing of layers + // which requires passing the index of the layer. Hence, we need to + // special case and cast. + dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions) + if ok { + options := internalTypes.PutBlobOptions{ + Cache: c.blobInfoCache, + IsConfig: isConfig, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) + } else { + uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig) + } if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index be46508de6e..a9533ea39a4 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -502,6 +502,8 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url attempts == backoffNumIterations { return res, err } + // close response body before retry or context done + res.Body.Close() delay = parseRetryAfter(res, delay) if delay > backoffMaxDelay { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 0c1cee0d32c..f9fe4e8a373 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -139,6 +139,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef return "", err } + defer res.Body.Close() if res.StatusCode != http.StatusOK { return "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading digest %s in %s", tagOrDigest, dr.ref.Name()) } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 842dcfba68b..e11084dc8ab 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -445,7 +445,7 @@ func successStatus(status int) bool { return status >= 200 && status <= 399 } -// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. +// isManifestInvalidError returns true iff err from client.HandleErrorResponse is a “manifest invalid” error. func isManifestInvalidError(err error) bool { errors, ok := err.(errcode.Errors) if !ok || len(errors) == 0 { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index bff950bb02d..6916b7dad90 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -251,6 +251,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) if resp.StatusCode != http.StatusOK { err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) logrus.Debug(err) + resp.Body.Close() continue } break @@ -290,6 +291,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca return nil, 0, err } if err := httpResponseToError(res, "Error fetching blob"); err != nil { + res.Body.Close() return nil, 0, err } cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef)) diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 5ea542bcfec..3e81e06c0cc 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -123,14 +123,6 @@ var compatibility = map[string][]string{ "arm64": {"v8"}, } -// baseVariants contains, for a specified architecture, a variant that is known to be -// supported by _all_ machines using that architecture. -// Architectures that don’t have variants, or where there are possible versions without -// an established variant name, should not have an entry here. -var baseVariants = map[string]string{ - "arm64": "v8", -} - // WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, // the most compatible platform is first. // If some option (arch, os, variant) is not present, a value from current platform is detected. @@ -158,6 +150,8 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { var variants []string = nil if wantedVariant != "" { + // If the user requested a specific variant, we'll walk down + // the list from most to least compatible. if compatibility[wantedArch] != nil { variantOrder := compatibility[wantedArch] for i, v := range variantOrder { @@ -171,12 +165,14 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { // user wants a variant which we know nothing about - not even compatibility variants = []string{wantedVariant} } + // Make sure to have a candidate with an empty variant as well. variants = append(variants, "") } else { - variants = append(variants, "") // No variant specified, use a “no variant specified” image if present - if baseVariant, ok := baseVariants[wantedArch]; ok { - // But also accept an image with the “base” variant for the architecture, if it exists. - variants = append(variants, baseVariant) + // Make sure to have a candidate with an empty variant as well. + variants = append(variants, "") + // If available add the entire compatibility matrix for the specific architecture. + if possibleVariants, ok := compatibility[wantedArch]; ok { + variants = append(variants, possibleVariants...) } } diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go new file mode 100644 index 00000000000..9adf0d536af --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/types/types.go @@ -0,0 +1,53 @@ +package types + +import ( + "context" + "io" + + publicTypes "github.com/containers/image/v5/types" +) + +// ImageDestinationWithOptions is an internal extension to the ImageDestination +// interface. +type ImageDestinationWithOptions interface { + publicTypes.ImageDestination + + // PutBlobWithOptions is a wrapper around PutBlob. If + // options.LayerIndex is set, the blob will be committed directly. + // Either by the calling goroutine or by another goroutine already + // committing layers. + // + // Please note that TryReusingBlobWithOptions and PutBlobWithOptions + // *must* be used the together. Mixing the two with non "WithOptions" + // functions is not supported. + PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error) + + // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If + // options.LayerIndex is set, the reused blob will be recoreded as + // already pulled. + // + // Please note that TryReusingBlobWithOptions and PutBlobWithOptions + // *must* be used the together. Mixing the two with non "WithOptions" + // functions is not supported. + TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error) +} + +// PutBlobOptions are used in PutBlobWithOptions. +type PutBlobOptions struct { + // Cache to look up blob infos. + Cache publicTypes.BlobInfoCache + // Denotes whether the blob is a config or not. + IsConfig bool + // The corresponding index in the layer slice. + LayerIndex *int +} + +// TryReusingBlobOptions are used in TryReusingBlobWithOptions. +type TryReusingBlobOptions struct { + // Cache to look up blob infos. + Cache publicTypes.BlobInfoCache + // Use an equivalent of the desired blob. + CanSubstitute bool + // The corresponding index in the layer slice. + LayerIndex *int +} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 7b07588730d..32680e09d4e 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -30,7 +30,7 @@ const ( DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzippped schema 2 foreign layers. + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" ) @@ -132,9 +132,16 @@ func GuessMIMEType(manifest []byte) string { if err := json.Unmarshal(manifest, &ociMan); err != nil { return "" } - if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig { + switch ociMan.Config.MediaType { + case imgspecv1.MediaTypeImageConfig: return imgspecv1.MediaTypeImageManifest + case DockerV2Schema2ConfigMediaType: + // This case should not happen since a Docker image + // must declare a top-level media type and + // `meta.MediaType` has already been checked. + return DockerV2Schema2MediaType } + // Maybe an image index or an OCI artifact. ociIndex := struct { Manifests []imgspecv1.Descriptor `json:"manifests"` }{} @@ -145,9 +152,13 @@ func GuessMIMEType(manifest []byte) string { if ociMan.Config.MediaType == "" { return imgspecv1.MediaTypeImageIndex } + // FIXME: this is mixing media types of manifests and configs. return ociMan.Config.MediaType } - return DockerV2Schema2MediaType + // It's most likely an OCI artifact with a custom config media + // type which is not (and cannot) be covered by the media-type + // checks cabove. + return imgspecv1.MediaTypeImageManifest } return "" } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go index 2c211b8b84a..a472efd95bf 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go @@ -282,7 +282,7 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { digestKey := []byte(digest.String()) b := scopeBucket.Bucket(digestKey) @@ -321,7 +321,7 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go index 01abb8d1e97..83034b618d9 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -20,7 +20,7 @@ const ( systemBlobInfoCacheDir = "/var/lib/containers/cache" ) -// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. +// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid. // euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { if sys != nil && sys.BlobInfoCacheDir != "" { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 3d598057e35..426640366fc 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -120,7 +120,7 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso mem.compressors[blobDigest] = compressorName } -// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present for l, t := range locations { @@ -146,7 +146,7 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go index 2a54ff312e4..4b7122f9285 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -42,7 +42,7 @@ func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.B // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 983df41d825..b84aac6e44f 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -6,14 +6,17 @@ import ( "fmt" "io/ioutil" "os" + "os/exec" "path/filepath" "runtime" "strings" + "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" + "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -41,12 +44,6 @@ var ( dockerLegacyHomePath = ".dockercfg" nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json") - // Note that the keyring support has been disabled as it was causing - // regressions. Before enabling, please revisit TODO(keyring) comments - // which need to be addressed if the need remerged to support the - // kernel keyring. - enableKeyring = false - // ErrNotLoggedIn is returned for users not logged into a registry // that they are trying to logout of ErrNotLoggedIn = errors.New("not logged in") @@ -54,72 +51,114 @@ var ( ErrNotSupported = errors.New("not supported") ) -// SetAuthentication stores the username and password in the auth.json file +// SetAuthentication stores the username and password in the credential helper or file func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - if ch, exists := auths.CredHelpers[registry]; exists { - return false, setAuthToCredHelper(ch, registry, username, password) - } + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return err + } - // Set the credentials to kernel keyring if enableKeyring is true. - // The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms. - // Hence, we want to fall-back to using the authfile in case the keyring failed. - // However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring. - if enableKeyring { - err := setAuthToKernelKeyring(registry, username, password) - if err == nil { - logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username) - return false, nil - } - logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err) + // Make sure to collect all errors. + var multiErr error + for _, helper := range helpers { + var err error + switch helper { + // Special-case the built-in helpers for auth files. + case sysregistriesv2.AuthenticationFileHelper: + err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if ch, exists := auths.CredHelpers[registry]; exists { + return false, setAuthToCredHelper(ch, registry, username, password) + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + auths.AuthConfigs[registry] = newCreds + return true, nil + }) + // External helpers. + default: + err = setAuthToCredHelper(helper, registry, username, password) + } + if err != nil { + multiErr = multierror.Append(multiErr, err) + logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", registry, helper, err) + continue } - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - auths.AuthConfigs[registry] = newCreds - return true, nil - }) + logrus.Debugf("Stored credentials for %s in credential helper %s", registry, helper) + return nil + } + return multiErr } // GetAllCredentials returns the registry credentials for all registries stored -// in either the auth.json file or the docker/config.json. +// in any of the configured credential helpers. func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { - // Note: we need to read the auth files in the inverse order to prevent - // a priority inversion when writing to the map. - authConfigs := make(map[string]types.DockerAuthConfig) - paths := getAuthFilePaths(sys, homedir.Get()) - for i := len(paths) - 1; i >= 0; i-- { - path := paths[i] - // readJSONFile returns an empty map in case the path doesn't exist. - auths, err := readJSONFile(path.path, path.legacyFormat) - if err != nil { - return nil, errors.Wrapf(err, "error reading JSON file %q", path.path) - } + // To keep things simple, let's first extract all registries from all + // possible sources, and then call `GetCredentials` on them. That + // prevents us from having to reverse engineer the logic in + // `GetCredentials`. + allRegistries := make(map[string]bool) + addRegistry := func(s string) { + allRegistries[s] = true + } - for registry, data := range auths.AuthConfigs { - conf, err := decodeDockerAuth(data) - if err != nil { - return nil, err + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return nil, err + } + for _, helper := range helpers { + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + for _, path := range getAuthFilePaths(sys, homedir.Get()) { + // readJSONFile returns an empty map in case the path doesn't exist. + auths, err := readJSONFile(path.path, path.legacyFormat) + if err != nil { + return nil, errors.Wrapf(err, "error reading JSON file %q", path.path) + } + // Credential helpers in the auth file have a + // direct mapping to a registry, so we can just + // walk the map. + for registry := range auths.CredHelpers { + addRegistry(registry) + } + for registry := range auths.AuthConfigs { + addRegistry(registry) + } } - authConfigs[normalizeRegistry(registry)] = conf - } - - // Credential helpers may override credentials from the auth file. - for registry, credHelper := range auths.CredHelpers { - username, password, err := getAuthFromCredHelper(credHelper, registry) + // External helpers. + default: + creds, err := listAuthsFromCredHelper(helper) if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - continue + logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) + } + switch errors.Cause(err) { + case nil: + for registry := range creds { + addRegistry(registry) } + case exec.ErrNotFound: + // It's okay if the helper doesn't exist. + default: return nil, err } - - conf := types.DockerAuthConfig{Username: username, Password: password} - authConfigs[normalizeRegistry(registry)] = conf } } - // TODO(keyring): if we ever re-enable the keyring support, we had to - // query all credentials from the keyring here. + // Now use `GetCredentials` to the specific auth configs for each + // previously listed registry. + authConfigs := make(map[string]types.DockerAuthConfig) + for registry := range allRegistries { + authConf, err := GetCredentials(sys, registry) + if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + // Ignore if the credentials could not be found (anymore). + continue + } + // Note: we rely on the logging in `GetCredentials`. + return nil, err + } + authConfigs[registry] = authConf + } return authConfigs, nil } @@ -159,7 +198,9 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { return paths } -// GetCredentials returns the registry credentials stored in either auth.json +// GetCredentials returns the registry credentials stored in the +// registry-specific credential helpers or in the default global credentials +// helpers with falling back to using either auth.json // file or .docker/config.json, including support for OAuth2 and IdentityToken. // If an entry is not found, an empty struct is returned. func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { @@ -170,41 +211,65 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // it exists only to allow testing it with an artificial home directory. func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) { if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debug("Returning credentials from DockerAuthConfig") + logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry) return *sys.DockerAuthConfig, nil } - if enableKeyring { - username, password, err := getAuthFromKernelKeyring(registry) - if err == nil { - logrus.Debug("returning credentials from kernel keyring") - return types.DockerAuthConfig{ - Username: username, - Password: password, - }, nil + // Anonymous function to query credentials from auth files. + getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) { + for _, path := range getAuthFilePaths(sys, homeDir) { + authConfig, err := findAuthentication(registry, path.path, path.legacyFormat) + if err != nil { + return types.DockerAuthConfig{}, err + } + + if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { + return authConfig, nil + } } + return types.DockerAuthConfig{}, nil + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return types.DockerAuthConfig{}, err } - for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findAuthentication(registry, path.path, path.legacyFormat) + var multiErr error + for _, helper := range helpers { + var creds types.DockerAuthConfig + var err error + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + creds, err = getCredentialsFromAuthFiles() + // External helpers. + default: + creds, err = getAuthFromCredHelper(helper, registry) + } if err != nil { - logrus.Debugf("Credentials not found") - return types.DockerAuthConfig{}, err + logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err) + multiErr = multierror.Append(multiErr, err) + continue } - - if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { - logrus.Debugf("Returning credentials from %s", path.path) - return authConfig, nil + if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 { + continue } + logrus.Debugf("Found credentials for %s in credential helper %s", registry, helper) + return creds, nil + } + if multiErr != nil { + return types.DockerAuthConfig{}, multiErr } - logrus.Debugf("Credentials not found") + logrus.Debugf("No credentials for %s found", registry) return types.DockerAuthConfig{}, nil } -// GetAuthentication returns the registry credentials stored in -// either auth.json file or .docker/config.json -// If an entry is not found empty strings are returned for the username and password +// GetAuthentication returns the registry credentials stored in the +// registry-specific credential helpers or in the default global credentials +// helpers with falling back to using either auth.json file or +// .docker/config.json // // Deprecated: This API only has support for username and password. To get the // support for oauth2 in docker registry authentication, we added the new @@ -227,53 +292,132 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st return auth.Username, auth.Password, nil } -// RemoveAuthentication deletes the credentials stored in auth.json +// RemoveAuthentication removes credentials for `registry` from all possible +// sources such as credential helpers and auth files. func RemoveAuthentication(sys *types.SystemContext, registry string) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - // First try cred helpers. - if ch, exists := auths.CredHelpers[registry]; exists { - return false, deleteAuthFromCredHelper(ch, registry) + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return err + } + + var multiErr error + isLoggedIn := false + + removeFromCredHelper := func(helper string) { + err := deleteAuthFromCredHelper(helper, registry) + if err == nil { + logrus.Debugf("Credentials for %q were deleted from credential helper %s", registry, helper) + isLoggedIn = true + return + } + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", registry, helper) + return } + multiErr = multierror.Append(multiErr, errors.Wrapf(err, "error removing credentials for %s from credential helper %s", registry, helper)) + } - // Next if keyring is enabled try kernel keyring - if enableKeyring { - err := deleteAuthFromKernelKeyring(registry) - if err == nil { - logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry) - return false, nil + for _, helper := range helpers { + var err error + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if innerHelper, exists := auths.CredHelpers[registry]; exists { + removeFromCredHelper(innerHelper) + } + if _, ok := auths.AuthConfigs[registry]; ok { + isLoggedIn = true + delete(auths.AuthConfigs, registry) + } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { + isLoggedIn = true + delete(auths.AuthConfigs, normalizeRegistry(registry)) + } + return true, multiErr + }) + if err != nil { + multiErr = multierror.Append(multiErr, err) } - logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles") + // External helpers. + default: + removeFromCredHelper(helper) } + } - if _, ok := auths.AuthConfigs[registry]; ok { - delete(auths.AuthConfigs, registry) - } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { - delete(auths.AuthConfigs, normalizeRegistry(registry)) - } else { - return false, ErrNotLoggedIn - } - return true, nil - }) + if multiErr != nil { + return multiErr + } + if !isLoggedIn { + return ErrNotLoggedIn + } + + return nil } -// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring +// RemoveAllAuthentication deletes all the credentials stored in credential +// helpers and auth files. func RemoveAllAuthentication(sys *types.SystemContext) error { - return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { - if enableKeyring { - err := removeAllAuthFromKernelKeyring() - if err == nil { - logrus.Debugf("removing all credentials from kernel keyring") - return false, nil + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return err + } + + var multiErr error + for _, helper := range helpers { + var err error + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + for registry, helper := range auths.CredHelpers { + // Helpers in auth files are expected + // to exist, so no special treatment + // for them. + if err := deleteAuthFromCredHelper(helper, registry); err != nil { + return false, err + } + } + auths.CredHelpers = make(map[string]string) + auths.AuthConfigs = make(map[string]dockerAuthConfig) + return true, nil + }) + // External helpers. + default: + var creds map[string]string + creds, err = listAuthsFromCredHelper(helper) + switch errors.Cause(err) { + case nil: + for registry := range creds { + err = deleteAuthFromCredHelper(helper, registry) + if err != nil { + break + } + } + case exec.ErrNotFound: + // It's okay if the helper doesn't exist. + continue + default: + // fall through } - logrus.Debugf("error removing credentials from kernel keyring") } - auths.CredHelpers = make(map[string]string) - auths.AuthConfigs = make(map[string]dockerAuthConfig) - return true, nil - }) + if err != nil { + logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err) + multiErr = multierror.Append(multiErr, err) + continue + } + logrus.Debugf("All credentials removed from credential helper %s", helper) + } + + return multiErr +} + +func listAuthsFromCredHelper(credHelper string) (map[string]string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.List(p) } -// getPathToAuth gets the path of the auth.json file used for reading and writing credentials +// getPathToAuth gets the path of the auth.json file used for reading and writting credentials // returns the path, and a bool specifies whether the file is in legacy format func getPathToAuth(sys *types.SystemContext) (string, bool, error) { return getPathToAuthWithOS(sys, runtime.GOOS) @@ -387,14 +531,17 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return nil } -func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { +func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) if err != nil { - return "", "", err + return types.DockerAuthConfig{}, err } - return creds.Username, creds.Secret, nil + return types.DockerAuthConfig{ + Username: creds.Username, + Password: creds.Secret, + }, nil } func setAuthToCredHelper(credHelper, registry, username, password string) error { @@ -423,15 +570,7 @@ func findAuthentication(registry, path string, legacyFormat bool) (types.DockerA // First try cred helpers. They should always be normalized. if ch, exists := auths.CredHelpers[registry]; exists { - username, password, err := getAuthFromCredHelper(ch, registry) - if err != nil { - return types.DockerAuthConfig{}, err - } - - return types.DockerAuthConfig{ - Username: username, - Password: password, - }, nil + return getAuthFromCredHelper(ch, registry) } // I'm feeling lucky diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go index 1531d694315..5bbfb450f82 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go @@ -9,9 +9,13 @@ import ( "github.com/sirupsen/logrus" ) -const keyDescribePrefix = "container-registry-login:" +// NOTE: none of the functions here are currently used. If we ever want to +// reenable keyring support, we should introduce a similar built-in credential +// helpers as for `sysregistriesv2.AuthenticationFileHelper`. -func getAuthFromKernelKeyring(registry string) (string, string, error) { +const keyDescribePrefix = "container-registry-login:" // nolint + +func getAuthFromKernelKeyring(registry string) (string, string, error) { // nolint userkeyring, err := keyctl.UserKeyring() if err != nil { return "", "", err @@ -31,7 +35,7 @@ func getAuthFromKernelKeyring(registry string) (string, string, error) { return parts[0], parts[1], nil } -func deleteAuthFromKernelKeyring(registry string) error { +func deleteAuthFromKernelKeyring(registry string) error { // nolint userkeyring, err := keyctl.UserKeyring() if err != nil { @@ -44,7 +48,7 @@ func deleteAuthFromKernelKeyring(registry string) error { return key.Unlink() } -func removeAllAuthFromKernelKeyring() error { +func removeAllAuthFromKernelKeyring() error { // nolint keys, err := keyctl.ReadUserKeyring() if err != nil { return err @@ -77,7 +81,7 @@ func removeAllAuthFromKernelKeyring() error { return nil } -func setAuthToKernelKeyring(registry, username, password string) error { +func setAuthToKernelKeyring(registry, username, password string) error { // nolint keyring, err := keyctl.SessionKeyring() if err != nil { return err @@ -110,6 +114,6 @@ func setAuthToKernelKeyring(registry, username, password string) error { return nil } -func genDescription(registry string) string { +func genDescription(registry string) string { // nolint return fmt.Sprintf("%s%s", keyDescribePrefix, registry) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index a9c498d7a5d..784a616dc91 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -34,15 +34,9 @@ func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) { } // Rootless user - var cacheRoot string - if xdgCache := os.Getenv("XDG_CACHE_HOME"); xdgCache != "" { - cacheRoot = xdgCache - } else { - configHome, err := homedir.GetConfigHome() - if err != nil { - return "", err - } - cacheRoot = filepath.Join(configHome, ".cache") + cacheRoot, err := homedir.GetCacheHome() + if err != nil { + return "", err } return filepath.Join(cacheRoot, userShortNamesFile), nil diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 3312237efac..880f8c8710e 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -30,16 +30,24 @@ const builtinRegistriesConfPath = "/etc/containers/registries.conf" // systemRegistriesConfDirPath is the path to the system-wide registry // configuration directory and is used to add/subtract potential registries for // obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirecotyPath=$your_path' +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' var systemRegistriesConfDirPath = builtinRegistriesConfDirPath // builtinRegistriesConfDirPath is the path to the registry configuration directory. // DO NOT change this, instead see systemRegistriesConfDirectoryPath above. const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" +// AuthenticationFileHelper is a special key for credential helpers indicating +// the usage of consulting containers-auth.json files instead of a credential +// helper. +const AuthenticationFileHelper = "containers-auth.json" + // Endpoint describes a remote location of a registry. type Endpoint struct { - // The endpoint's remote location. + // The endpoint's remote location. Can be empty iff Prefix contains + // wildcard in the format: "*.example.com" for subdomain matching. + // Please refer to FindRegistry / PullSourcesFromReference instead + // of accessing/interpreting `Location` directly. Location string `toml:"location,omitempty"` // If true, certs verification will be skipped and HTTP (non-TLS) // connections will be allowed. @@ -57,11 +65,26 @@ var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d // The function errors if the newly created reference is not parsable. func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { refString := ref.String() - if !refMatchesPrefix(refString, prefix) { + var newNamedRef string + // refMatchingPrefix returns the length of the match. Everything that + // follows the match gets appended to registries location. + prefixLen := refMatchingPrefix(refString, prefix) + if prefixLen == -1 { return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) } - - newNamedRef := strings.Replace(refString, prefix, e.Location, 1) + // In the case of an empty `location` field, simply return the original + // input ref as-is. + // + // FIXME: already validated in postProcessRegistries, so check can probably + // be dropped. + // https://github.com/containers/image/pull/1191#discussion_r610621608 + if e.Location == "" { + if prefix[:2] != "*." { + return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix) + } + return ref, nil + } + newNamedRef = e.Location + refString[prefixLen:] newParsedRef, err := reference.ParseNamed(newNamedRef) if err != nil { return nil, errors.Wrapf(err, "error rewriting reference") @@ -77,6 +100,11 @@ type Registry struct { // and we pull from "example.com/bar/myimage:latest", the image will // effectively be pulled from "example.com/foo/bar/myimage:latest". // If no Prefix is specified, it defaults to the specified location. + // Prefix can also be in the format: "*.example.com" for matching + // subdomains. The wildcard should only be in the beginning and should also + // not contain any namespaces or special characters: "/", "@" or ":". + // Please refer to FindRegistry / PullSourcesFromReference instead + // of accessing/interpreting `Prefix` directly. Prefix string `toml:"prefix"` // A registry is an Endpoint too Endpoint @@ -154,6 +182,14 @@ type V2RegistriesConf struct { Registries []Registry `toml:"registry"` // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` + // An array of global credential helpers to use for authentication + // (e.g., ["pass", "secretservice"]). The helpers are consulted in the + // specified order. Note that "containers-auth.json" is a reserved + // value for consulting auth files as specified in + // containers-auth.json(5). + // + // If empty, CredentialHelpers defaults to ["containers-auth.json"]. + CredentialHelpers []string `toml:"credential-helpers"` // ShortNameMode defines how short-name resolution should be handled by // _consumers_ of this package. Depending on the mode, the user should @@ -177,7 +213,7 @@ func (config *V2RegistriesConf) Nonempty() bool { // parsedConfig is the result of parsing, and possibly merging, configuration files; // it is the boundary between the process of reading+ingesting the files, and -// later interpreting the configuraiton based on caller’s requests. +// later interpreting the configuration based on caller’s requests. type parsedConfig struct { // NOTE: Update also parsedConfig.updateWithConfigurationFrom! @@ -212,9 +248,15 @@ func (e *InvalidRegistries) Error() string { func parseLocation(input string) (string, error) { trimmed := strings.TrimRight(input, "/") - if trimmed == "" { - return "", &InvalidRegistries{s: "invalid location: cannot be empty"} - } + // FIXME: This check needs to exist but fails for empty Location field with + // wildcarded prefix. Removal of this check "only" allows invalid input in, + // and does not prevent correct operation. + // https://github.com/containers/image/pull/1191#discussion_r610122617 + // + // if trimmed == "" { + // return "", &InvalidRegistries{s: "invalid location: cannot be empty"} + // } + // if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) @@ -293,12 +335,20 @@ func (config *V2RegistriesConf) postProcessRegistries() error { } if reg.Prefix == "" { + if reg.Location == "" { + return &InvalidRegistries{s: "invalid condition: both location and prefix are unset"} + } reg.Prefix = reg.Location } else { reg.Prefix, err = parseLocation(reg.Prefix) if err != nil { return err } + // FIXME: allow config authors to always use Prefix. + // https://github.com/containers/image/pull/1191#discussion_r610622495 + if reg.Prefix[:2] != "*." && reg.Location == "" { + return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"} + } } // make sure mirrors are valid @@ -307,8 +357,19 @@ func (config *V2RegistriesConf) postProcessRegistries() error { if err != nil { return err } + + //FIXME: unqualifiedSearchRegistries now also accepts empty values + //and shouldn't + // https://github.com/containers/image/pull/1191#discussion_r610623216 + if mir.Location == "" { + return &InvalidRegistries{s: "invalid condition: mirror location is unset"} + } + } + if reg.Location == "" { + regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) + } else { + regMap[reg.Location] = append(regMap[reg.Location], reg) } - regMap[reg.Location] = append(regMap[reg.Location], reg) } // Given a registry can be mentioned multiple times (e.g., to have @@ -318,7 +379,13 @@ func (config *V2RegistriesConf) postProcessRegistries() error { // Note: we need to iterate over the registries array to ensure a // deterministic behavior which is not guaranteed by maps. for _, reg := range config.Registries { - others, ok := regMap[reg.Location] + var others []*Registry + var ok bool + if reg.Location == "" { + others, ok = regMap[reg.Prefix] + } else { + others, ok = regMap[reg.Location] + } if !ok { return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing") } @@ -450,7 +517,7 @@ func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) confi return wrapper } -// ConfigurationSourceDescription returns a string containres paths of registries.conf and registries.conf.d +// ConfigurationSourceDescription returns a string containers paths of registries.conf and registries.conf.d func ConfigurationSourceDescription(ctx *types.SystemContext) string { wrapper := newConfigWrapper(ctx) configSources := []string{wrapper.configPath} @@ -601,11 +668,17 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC config.shortNameMode = defaultShortNameMode } + if len(config.partialV2.CredentialHelpers) == 0 { + config.partialV2.CredentialHelpers = []string{AuthenticationFileHelper} + } + // populate the cache configCache[wrapper] = config return config, nil } +// GetRegistries has been deprecated. Use FindRegistry instead. +// // GetRegistries loads and returns the registries specified in the config. // Note the parsed content of registry config files is cached. For reloading, // use `InvalidateCache` and re-call `GetRegistries`. @@ -663,27 +736,72 @@ func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) { return config.shortNameMode, err } -// refMatchesPrefix returns true iff ref, +// CredentialHelpers returns the global top-level credential helpers. +func CredentialHelpers(sys *types.SystemContext) ([]string, error) { + config, err := getConfig(sys) + if err != nil { + return nil, err + } + return config.partialV2.CredentialHelpers, nil +} + +// refMatchingSubdomainPrefix returns the length of ref +// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value containing wildcarded subdomains in the +// format: *.example.com. Wildcards are only accepted at the beginning, so +// other formats like example.*.com will not work. Wildcarded prefixes also +// cannot contain port numbers or namespaces in them. +func refMatchingSubdomainPrefix(ref, prefix string) int { + index := strings.Index(ref, prefix[1:]) + if index == -1 { + return -1 + } + if strings.Contains(ref[:index], "/") { + return -1 + } + index += len(prefix[1:]) + if index == len(ref) { + return index + } + switch ref[index] { + case ':', '/', '@': + return index + default: + return -1 + } +} + +// refMatchingPrefix returns the length of the prefix iff ref, // which is a registry, repository namespace, repository or image reference (as formatted by // reference.Domain(), reference.Named.Name() or reference.Reference.String() // — note that this requires the name to start with an explicit hostname!), // matches a Registry.Prefix value. // (This is split from the caller primarily to make testing easier.) -func refMatchesPrefix(ref, prefix string) bool { +func refMatchingPrefix(ref, prefix string) int { switch { + case prefix[0:2] == "*.": + return refMatchingSubdomainPrefix(ref, prefix) case len(ref) < len(prefix): - return false + return -1 case len(ref) == len(prefix): - return ref == prefix + if ref == prefix { + return len(prefix) + } + return -1 case len(ref) > len(prefix): if !strings.HasPrefix(ref, prefix) { - return false + return -1 } c := ref[len(prefix)] // This allows "example.com:5000" to match "example.com", // which is unintended; that will get fixed eventually, DON'T RELY // ON THE CURRENT BEHAVIOR. - return c == ':' || c == '/' || c == '@' + if c == ':' || c == '/' || c == '@' { + return len(prefix) + } + return -1 default: panic("Internal error: impossible comparison outcome") } @@ -700,10 +818,16 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { return nil, err } + return findRegistryWithParsedConfig(config, ref) +} + +// findRegistryWithParsedConfig implements `FindRegistry` with a pre-loaded +// parseConfig. +func findRegistryWithParsedConfig(config *parsedConfig, ref string) (*Registry, error) { reg := Registry{} prefixLen := 0 for _, r := range config.partialV2.Registries { - if refMatchesPrefix(ref, r.Prefix) { + if refMatchingPrefix(ref, r.Prefix) != -1 { length := len(r.Prefix) if length > prefixLen { reg = r @@ -772,6 +896,17 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { res.shortNameMode = types.ShortNameModeInvalid } + // Valid wildcarded prefixes must be in the format: *.example.com + // FIXME: Move to postProcessRegistries + // https://github.com/containers/image/pull/1191#discussion_r610623829 + for i := range res.partialV2.Registries { + prefix := res.partialV2.Registries[i].Prefix + if prefix[:2] == "*." && strings.ContainsAny(prefix, "/@:") { + msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix) + return nil, &InvalidRegistries{s: msg} + } + } + // Parse and validate short-name aliases. cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf) if err != nil { @@ -825,6 +960,11 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin } + // == Merge credential helpers: + if updates.partialV2.CredentialHelpers != nil { + c.partialV2.CredentialHelpers = updates.partialV2.CredentialHelpers + } + // == Merge shortNameMode: // We don’t maintain c.partialV2.ShortNameMode. if updates.shortNameMode != types.ShortNameModeInvalid { diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 8655ca44374..3e7abd34dc7 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -147,7 +147,7 @@ type BlobInfo struct { } // BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. -// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest). // The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. // // NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different @@ -179,7 +179,7 @@ type BICReplacementCandidate struct { // It records two kinds of data: // - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: // One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. -// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), // or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ // // It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known @@ -219,7 +219,7 @@ type BlobInfoCache interface { // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // - // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, + // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate @@ -582,7 +582,7 @@ type SystemContext struct { // === OCI.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key + // a client certificate (ending with ".cert") and a client certificate key // (ending with ".key") used when downloading OCI image layers. OCICertPath string // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. @@ -594,7 +594,7 @@ type SystemContext struct { // === docker.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key + // a client certificate (ending with ".cert") and a client certificate key // (ending with ".key") used when talking to a Docker Registry. DockerCertPath string // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 4c722505c7d..3e9f09aab65 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 10 + VersionMinor = 11 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml index 7031d938a40..e4dd4a4021c 100644 --- a/vendor/github.com/containers/ocicrypt/.travis.yml +++ b/vendor/github.com/containers/ocicrypt/.travis.yml @@ -6,6 +6,7 @@ os: go: - "1.13.x" + - "1.16.x" matrix: include: diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod index 06a77af95f3..02be185915f 100644 --- a/vendor/github.com/containers/ocicrypt/go.mod +++ b/vendor/github.com/containers/ocicrypt/go.mod @@ -13,9 +13,9 @@ require ( github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 github.com/stretchr/testify v1.3.0 go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 - golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de - golang.org/x/sys v0.0.0-20200817155316-9781c653f443 // indirect + golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 google.golang.org/grpc v1.33.2 gopkg.in/square/go-jose.v2 v2.5.1 - gopkg.in/yaml.v2 v2.3.0 + gopkg.in/yaml.v2 v2.4.0 ) diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum index b014b76ee8c..7153900dafc 100644 --- a/vendor/github.com/containers/ocicrypt/go.sum +++ b/vendor/github.com/containers/ocicrypt/go.sum @@ -3,7 +3,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -53,8 +52,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -63,21 +62,23 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443 h1:X18bCaipMcoJGm27Nv7zr4XYPKGUy92GtqboKC2Hxaw= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -110,7 +111,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go index c4d31e52d67..b9d55539a11 100644 --- a/vendor/github.com/containers/ocicrypt/gpg.go +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -27,7 +27,7 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) // GPGVersion enum representing the GPG client version to use. @@ -387,7 +387,7 @@ func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo)) fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid) - password, err := terminal.ReadPassword(int(os.Stdin.Fd())) + password, err := term.ReadPassword(int(os.Stdin.Fd())) fmt.Printf("\n") if err != nil { return nil, nil, err diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go index e2ed4b1d849..38633b19b8b 100644 --- a/vendor/github.com/containers/ocicrypt/utils/testing.go +++ b/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -67,7 +67,7 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte typ := "RSA PRIVATE KEY" if len(password) > 0 { - block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) + block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility if err != nil { return nil, nil, errors.Wrap(err, "x509.EncryptPEMBlock failed") } diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go index 7bc2aa28d31..07fe6d36708 100644 --- a/vendor/github.com/containers/ocicrypt/utils/utils.go +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -95,11 +95,11 @@ func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{ block, _ := pem.Decode(privKey) if block != nil { var der []byte - if x509.IsEncryptedPEMBlock(block) { + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility if privKeyPassword == nil { return nil, errors.Errorf("%s: Missing password for encrypted private key", prefix) } - der, err = x509.DecryptPEMBlock(block, privKeyPassword) + der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility if err != nil { return nil, errors.Errorf("%s: Wrong password: could not decrypt private key", prefix) } diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS index 11cd83d14eb..129dd39692a 100644 --- a/vendor/github.com/containers/storage/AUTHORS +++ b/vendor/github.com/containers/storage/AUTHORS @@ -703,6 +703,7 @@ Joost Cassee Jordan Jordan Arentsen Jordan Sissel +Jordan Williams Jose Diaz-Gonzalez Joseph Anthony Pasquale Holsten Joseph Hager diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 607c5feb566..06b53854b93 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -28,3 +28,8 @@ func GetDataHome() (string, error) { func GetConfigHome() (string, error) { return "", errors.New("homedir.GetConfigHome() is not supported on this system") } + +// GetCacheHome is unsupported on non-linux system. +func GetCacheHome() (string, error) { + return "", errors.New("homedir.GetCacheHome() is not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 0274d037f83..2475e351bb5 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -123,3 +123,18 @@ func GetConfigHome() (string, error) { } return filepath.Join(home, ".config"), nil } + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 35104b2fd1e..0a92da2c0f0 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -191,7 +191,7 @@ func (l *lockfile) Touch() error { if !l.locked || (l.locktype != unix.F_WRLCK) { panic("attempted to update last-writer in lockfile without the write lock") } - l.stateMutex.Unlock() + defer l.stateMutex.Unlock() l.lw = stringid.GenerateRandomID() id := []byte(l.lw) _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) @@ -211,12 +211,12 @@ func (l *lockfile) Touch() error { // Modified indicates if the lockfile has been updated since the last time it // was loaded. func (l *lockfile) Modified() (bool, error) { - id := []byte(l.lw) l.stateMutex.Lock() + id := []byte(l.lw) if !l.locked { panic("attempted to check last-writer in lockfile without locking it first") } - l.stateMutex.Unlock() + defer l.stateMutex.Unlock() _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) if err != nil { return true, err diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go index cd4bacd663a..23c5c44ac0d 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mount.go +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -41,6 +41,11 @@ func (e *mountError) Cause() error { return e.err } +// Unwrap returns the underlying cause of the error +func (e *mountError) Unwrap() error { + return e.err +} + // Mount will mount filesystem according to the specified configuration, on the // condition that the target path is *not* already mounted. Options must be // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go index a0f4837a02c..ac24c7767d3 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go @@ -23,20 +23,7 @@ package journal import ( - "bytes" - "encoding/binary" - "errors" "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "unsafe" ) // Priority of a journal message @@ -53,173 +40,7 @@ const ( PriDebug ) -var ( - // This can be overridden at build-time: - // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable - journalSocket = "/run/systemd/journal/socket" - - // unixConnPtr atomically holds the local unconnected Unix-domain socket. - // Concrete safe pointer type: *net.UnixConn - unixConnPtr unsafe.Pointer - // onceConn ensures that unixConnPtr is initialized exactly once. - onceConn sync.Once -) - -func init() { - onceConn.Do(initConn) -} - -// Enabled checks whether the local systemd journal is available for logging. -func Enabled() bool { - onceConn.Do(initConn) - - if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { - return false - } - - if _, err := net.Dial("unixgram", journalSocket); err != nil { - return false - } - - return true -} - -// Send a message to the local systemd journal. vars is a map of journald -// fields to values. Fields must be composed of uppercase letters, numbers, -// and underscores, but must not start with an underscore. Within these -// restrictions, any arbitrary field name may be used. Some names have special -// significance: see the journalctl documentation -// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) -// for more details. vars may be nil. -func Send(message string, priority Priority, vars map[string]string) error { - conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) - if conn == nil { - return errors.New("could not initialize socket to journald") - } - - socketAddr := &net.UnixAddr{ - Name: journalSocket, - Net: "unixgram", - } - - data := new(bytes.Buffer) - appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) - appendVariable(data, "MESSAGE", message) - for k, v := range vars { - appendVariable(data, k, v) - } - - _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) - if err == nil { - return nil - } - if !isSocketSpaceError(err) { - return err - } - - // Large log entry, send it via tempfile and ancillary-fd. - file, err := tempFd() - if err != nil { - return err - } - defer file.Close() - _, err = io.Copy(file, data) - if err != nil { - return err - } - rights := syscall.UnixRights(int(file.Fd())) - _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) - if err != nil { - return err - } - - return nil -} - // Print prints a message to the local systemd journal using Send(). func Print(priority Priority, format string, a ...interface{}) error { return Send(fmt.Sprintf(format, a...), priority, nil) } - -func appendVariable(w io.Writer, name, value string) { - if err := validVarName(name); err != nil { - fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) - } - if strings.ContainsRune(value, '\n') { - /* When the value contains a newline, we write: - * - the variable name, followed by a newline - * - the size (in 64bit little endian format) - * - the data, followed by a newline - */ - fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) - fmt.Fprintln(w, value) - } else { - /* just write the variable and value all on one line */ - fmt.Fprintf(w, "%s=%s\n", name, value) - } -} - -// validVarName validates a variable name to make sure journald will accept it. -// The variable name must be in uppercase and consist only of characters, -// numbers and underscores, and may not begin with an underscore: -// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html -func validVarName(name string) error { - if name == "" { - return errors.New("Empty variable name") - } else if name[0] == '_' { - return errors.New("Variable name begins with an underscore") - } - - for _, c := range name { - if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { - return errors.New("Variable name contains invalid characters") - } - } - return nil -} - -// isSocketSpaceError checks whether the error is signaling -// an "overlarge message" condition. -func isSocketSpaceError(err error) bool { - opErr, ok := err.(*net.OpError) - if !ok || opErr == nil { - return false - } - - sysErr, ok := opErr.Err.(*os.SyscallError) - if !ok || sysErr == nil { - return false - } - - return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS -} - -// tempFd creates a temporary, unlinked file under `/dev/shm`. -func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") - if err != nil { - return nil, err - } - err = syscall.Unlink(file.Name()) - if err != nil { - return nil, err - } - return file, nil -} - -// initConn initializes the global `unixConnPtr` socket. -// It is meant to be called exactly once, at program startup. -func initConn() { - autobind, err := net.ResolveUnixAddr("unixgram", "") - if err != nil { - return - } - - sock, err := net.ListenUnixgram("unixgram", autobind) - if err != nil { - return - } - - atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) -} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go new file mode 100644 index 00000000000..7233ecfc7b1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -0,0 +1,208 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +func init() { + onceConn.Do(initConn) +} + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go new file mode 100644 index 00000000000..677aca68ed2 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go @@ -0,0 +1,35 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "errors" +) + +func Enabled() bool { + return false +} + +func Send(message string, priority Priority, vars map[string]string) error { + return errors.New("could not initialize socket to journald") +} diff --git a/vendor/github.com/coreos/vcontext/path/path.go b/vendor/github.com/coreos/vcontext/path/path.go index 3daadc784be..58be8baa1ab 100644 --- a/vendor/github.com/coreos/vcontext/path/path.go +++ b/vendor/github.com/coreos/vcontext/path/path.go @@ -41,6 +41,10 @@ func (c ContextPath) String() string { return strings.Join(strs, ".") } +// Append returns a new ContextPath with the specified elements appended. +// The underlying array is sometimes reused, so if the original path might +// be used in future Append operations, the returned ContextPath should not +// be stored into a long-lived data structure. (Store a copy instead.) func (c ContextPath) Append(e ...interface{}) ContextPath { return ContextPath{ Path: append(c.Path, e...), @@ -48,6 +52,11 @@ func (c ContextPath) Append(e ...interface{}) ContextPath { } } +// Copy returns an identical ContextPath that shares no state with the +// original. When storing a ContextPath into a data structure, usually a +// copy should be stored instead of the original (for example, Report does +// this), since Append sometimes modifies the path's underlying array in +// place. func (c ContextPath) Copy() ContextPath { // make sure to preserve reflect.DeepEqual() equality var path []interface{} diff --git a/vendor/github.com/coreos/vcontext/report/report.go b/vendor/github.com/coreos/vcontext/report/report.go index 5378e8433c7..618bc7536fa 100644 --- a/vendor/github.com/coreos/vcontext/report/report.go +++ b/vendor/github.com/coreos/vcontext/report/report.go @@ -131,7 +131,7 @@ func (r *Report) AddOn(c path.ContextPath, err error, k EntryKind) { } r.Entries = append(r.Entries, Entry{ Message: err.Error(), - Context: c, + Context: c.Copy(), Kind: k, }) } diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml index f4f458a416d..53bb8b3f952 100644 --- a/vendor/github.com/exponent-io/jsonpath/.travis.yml +++ b/vendor/github.com/exponent-io/jsonpath/.travis.yml @@ -1,5 +1,7 @@ language: go - +arch: + - amd64 + - ppc64le go: - - 1.5 + - 1.15 - tip diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go index 31de46c7381..5e3a06548af 100644 --- a/vendor/github.com/exponent-io/jsonpath/decoder.go +++ b/vendor/github.com/exponent-io/jsonpath/decoder.go @@ -39,16 +39,15 @@ func NewDecoder(r io.Reader) *Decoder { // Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { - if len(path) == 0 { - return len(d.path) == 0, nil - } - last := len(path) - 1 - if i, ok := path[last].(int); ok { - path[last] = i - 1 + if len(path) > 0 { + last := len(path) - 1 + if i, ok := path[last].(int); ok { + path[last] = i - 1 + } } for { - if d.path.Equal(path) { + if len(path) == len(d.path) && d.path.Equal(path) { return true, nil } _, err := d.Token() diff --git a/vendor/github.com/exponent-io/jsonpath/go.mod b/vendor/github.com/exponent-io/jsonpath/go.mod new file mode 100644 index 00000000000..0cb1f9486bd --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/go.mod @@ -0,0 +1,5 @@ +module github.com/exponent-io/jsonpath + +go 1.15 + +require github.com/stretchr/testify v1.6.1 diff --git a/vendor/github.com/exponent-io/jsonpath/go.sum b/vendor/github.com/exponent-io/jsonpath/go.sum new file mode 100644 index 00000000000..56d62e7c224 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml index 9aef9184e86..03a22fe06fd 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -1,8 +1,8 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.11.x -- 1.12.x +- 1.14.x +- 1.15.x install: - GO111MODULE=off go get -u gotest.tools/gotestsum env: diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index b284eb77a62..7df9853def6 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -114,16 +114,16 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() - switch kind { + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + } + switch kind { case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - } nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { return nil, kind, fmt.Errorf("object has no field %q", decodedToken) @@ -161,17 +161,17 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) - switch rValue.Kind() { - case reflect.Struct: - if ns, ok := node.(JSONSetable); ok { // pointer impl - return ns.JSONSet(decodedToken, data) - } + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } - if rValue.Type().Implements(jsonSetableType) { - return node.(JSONSetable).JSONSet(decodedToken, data) - } + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + switch rValue.Kind() { + case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { return fmt.Errorf("object has no field %q", decodedToken) @@ -270,22 +270,22 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() - switch kind { - - case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return err - } - fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = r + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() continue } + node = r + continue + } + + switch kind { + case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { return fmt.Errorf("object has no field %q", decodedToken) diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml new file mode 100644 index 00000000000..f9381aee541 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -0,0 +1,41 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml index 40b90757d89..05482f4b90c 100644 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -1,10 +1,19 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.11.x -- 1.12.x +- 1.14.x +- 1.x install: -- GO111MODULE=off go get -u gotest.tools/gotestsum +- go get gotest.tools/gotestsum +jobs: + include: + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master env: - GO111MODULE=on language: go diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index 66345f4c61f..b94753aa527 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -4,7 +4,7 @@ An implementation of JSON Reference - Go language ## Status -Work in progress ( 90% done ) +Feature complete. Stable API ## Dependencies https://github.com/go-openapi/jsonpointer diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod index aff1d0163ec..e6c2ec4d929 100644 --- a/vendor/github.com/go-openapi/jsonreference/go.mod +++ b/vendor/github.com/go-openapi/jsonreference/go.mod @@ -6,7 +6,7 @@ require ( github.com/go-openapi/jsonpointer v0.19.3 github.com/stretchr/testify v1.3.0 golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect - golang.org/x/text v0.3.2 // indirect + golang.org/x/text v0.3.3 // indirect ) go 1.13 diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum index c7ceab5802d..b37f873e553 100644 --- a/vendor/github.com/go-openapi/jsonreference/go.sum +++ b/vendor/github.com/go-openapi/jsonreference/go.sum @@ -5,12 +5,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -28,14 +24,12 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 625c3d6affe..842ac1c095c 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -20,3 +20,22 @@ linters: - lll - gochecknoinits - gochecknoglobals + - nlreturn + - testpackage + - wrapcheck + - gomnd + - exhaustive + - exhaustivestruct + - goerr113 + - wsl + - whitespace + - gofumpt + - godot + - nestif + - godox + - funlen + - gci + - gocognit + - paralleltest + - thelper + - ifshort diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml index aa26d8763aa..fc25a887286 100644 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ b/vendor/github.com/go-openapi/swag/.travis.yml @@ -1,12 +1,34 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.11.x -- 1.12.x +- 1.14.x +- 1.x +arch: +- amd64 +jobs: + include: + # include arch ppc, but only for latest go version - skip testing for race + - go: 1.x + arch: ppc64le + install: ~ + script: + - go test -v + + #- go: 1.x + # arch: arm + # install: ~ + # script: + # - go test -v + + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master install: - GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on language: go notifications: slack: diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index eb60ae80abe..217f6fa5054 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -2,7 +2,6 @@ [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go index 7da35c316e5..fc085aeb8ea 100644 --- a/vendor/github.com/go-openapi/swag/convert.go +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -88,7 +88,7 @@ func ConvertFloat64(str string) (float64, error) { return strconv.ParseFloat(str, 64) } -// ConvertInt8 turn a string into int8 boolean +// ConvertInt8 turn a string into an int8 func ConvertInt8(str string) (int8, error) { i, err := strconv.ParseInt(str, 10, 8) if err != nil { @@ -97,7 +97,7 @@ func ConvertInt8(str string) (int8, error) { return int8(i), nil } -// ConvertInt16 turn a string into a int16 +// ConvertInt16 turn a string into an int16 func ConvertInt16(str string) (int16, error) { i, err := strconv.ParseInt(str, 10, 16) if err != nil { @@ -106,7 +106,7 @@ func ConvertInt16(str string) (int16, error) { return int16(i), nil } -// ConvertInt32 turn a string into a int32 +// ConvertInt32 turn a string into an int32 func ConvertInt32(str string) (int32, error) { i, err := strconv.ParseInt(str, 10, 32) if err != nil { @@ -115,12 +115,12 @@ func ConvertInt32(str string) (int32, error) { return int32(i), nil } -// ConvertInt64 turn a string into a int64 +// ConvertInt64 turn a string into an int64 func ConvertInt64(str string) (int64, error) { return strconv.ParseInt(str, 10, 64) } -// ConvertUint8 turn a string into a uint8 +// ConvertUint8 turn a string into an uint8 func ConvertUint8(str string) (uint8, error) { i, err := strconv.ParseUint(str, 10, 8) if err != nil { @@ -129,7 +129,7 @@ func ConvertUint8(str string) (uint8, error) { return uint8(i), nil } -// ConvertUint16 turn a string into a uint16 +// ConvertUint16 turn a string into an uint16 func ConvertUint16(str string) (uint16, error) { i, err := strconv.ParseUint(str, 10, 16) if err != nil { @@ -138,7 +138,7 @@ func ConvertUint16(str string) (uint16, error) { return uint16(i), nil } -// ConvertUint32 turn a string into a uint32 +// ConvertUint32 turn a string into an uint32 func ConvertUint32(str string) (uint32, error) { i, err := strconv.ParseUint(str, 10, 32) if err != nil { @@ -147,7 +147,7 @@ func ConvertUint32(str string) (uint32, error) { return uint32(i), nil } -// ConvertUint64 turn a string into a uint64 +// ConvertUint64 turn a string into an uint64 func ConvertUint64(str string) (uint64, error) { return strconv.ParseUint(str, 10, 64) } diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go index c95e4e78bdd..c49cc473a8c 100644 --- a/vendor/github.com/go-openapi/swag/convert_types.go +++ b/vendor/github.com/go-openapi/swag/convert_types.go @@ -181,12 +181,12 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } -// Int32 returns a pointer to of the int64 value passed in. +// Int32 returns a pointer to of the int32 value passed in. func Int32(v int32) *int32 { return &v } -// Int32Value returns the value of the int64 pointer passed in or +// Int32Value returns the value of the int32 pointer passed in or // 0 if the pointer is nil. func Int32Value(v *int32) int32 { if v != nil { @@ -195,7 +195,7 @@ func Int32Value(v *int32) int32 { return 0 } -// Int32Slice converts a slice of int64 values into a slice of +// Int32Slice converts a slice of int32 values into a slice of // int32 pointers func Int32Slice(src []int32) []*int32 { dst := make([]*int32, len(src)) @@ -299,13 +299,80 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } -// Uint returns a pouinter to of the uint value passed in. +// Uint16 returns a pointer to of the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Uint returns a pointer to of the uint value passed in. func Uint(v uint) *uint { return &v } -// UintValue returns the value of the uint pouinter passed in or -// 0 if the pouinter is nil. +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. func UintValue(v *uint) uint { if v != nil { return *v @@ -313,8 +380,8 @@ func UintValue(v *uint) uint { return 0 } -// UintSlice converts a slice of uint values uinto a slice of -// uint pouinters +// UintSlice converts a slice of uint values into a slice of +// uint pointers func UintSlice(src []uint) []*uint { dst := make([]*uint, len(src)) for i := 0; i < len(src); i++ { @@ -323,7 +390,7 @@ func UintSlice(src []uint) []*uint { return dst } -// UintValueSlice converts a slice of uint pouinters uinto a slice of +// UintValueSlice converts a slice of uint pointers into a slice of // uint values func UintValueSlice(src []*uint) []uint { dst := make([]uint, len(src)) @@ -335,8 +402,8 @@ func UintValueSlice(src []*uint) []uint { return dst } -// UintMap converts a string map of uint values uinto a string -// map of uint pouinters +// UintMap converts a string map of uint values into a string +// map of uint pointers func UintMap(src map[string]uint) map[string]*uint { dst := make(map[string]*uint) for k, val := range src { @@ -346,7 +413,7 @@ func UintMap(src map[string]uint) map[string]*uint { return dst } -// UintValueMap converts a string map of uint pouinters uinto a string +// UintValueMap converts a string map of uint pointers into a string // map of uint values func UintValueMap(src map[string]*uint) map[string]uint { dst := make(map[string]uint) @@ -358,13 +425,13 @@ func UintValueMap(src map[string]*uint) map[string]uint { return dst } -// Uint32 returns a pouinter to of the uint64 value passed in. +// Uint32 returns a pointer to of the uint32 value passed in. func Uint32(v uint32) *uint32 { return &v } -// Uint32Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. func Uint32Value(v *uint32) uint32 { if v != nil { return *v @@ -372,8 +439,8 @@ func Uint32Value(v *uint32) uint32 { return 0 } -// Uint32Slice converts a slice of uint64 values uinto a slice of -// uint32 pouinters +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers func Uint32Slice(src []uint32) []*uint32 { dst := make([]*uint32, len(src)) for i := 0; i < len(src); i++ { @@ -382,7 +449,7 @@ func Uint32Slice(src []uint32) []*uint32 { return dst } -// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of // uint32 values func Uint32ValueSlice(src []*uint32) []uint32 { dst := make([]uint32, len(src)) @@ -394,8 +461,8 @@ func Uint32ValueSlice(src []*uint32) []uint32 { return dst } -// Uint32Map converts a string map of uint32 values uinto a string -// map of uint32 pouinters +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers func Uint32Map(src map[string]uint32) map[string]*uint32 { dst := make(map[string]*uint32) for k, val := range src { @@ -405,7 +472,7 @@ func Uint32Map(src map[string]uint32) map[string]*uint32 { return dst } -// Uint32ValueMap converts a string map of uint32 pouinters uinto a string +// Uint32ValueMap converts a string map of uint32 pointers into a string // map of uint32 values func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { dst := make(map[string]uint32) @@ -417,13 +484,13 @@ func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { return dst } -// Uint64 returns a pouinter to of the uint64 value passed in. +// Uint64 returns a pointer to of the uint64 value passed in. func Uint64(v uint64) *uint64 { return &v } -// Uint64Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. func Uint64Value(v *uint64) uint64 { if v != nil { return *v @@ -431,8 +498,8 @@ func Uint64Value(v *uint64) uint64 { return 0 } -// Uint64Slice converts a slice of uint64 values uinto a slice of -// uint64 pouinters +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers func Uint64Slice(src []uint64) []*uint64 { dst := make([]*uint64, len(src)) for i := 0; i < len(src); i++ { @@ -441,7 +508,7 @@ func Uint64Slice(src []uint64) []*uint64 { return dst } -// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of // uint64 values func Uint64ValueSlice(src []*uint64) []uint64 { dst := make([]uint64, len(src)) @@ -453,8 +520,8 @@ func Uint64ValueSlice(src []*uint64) []uint64 { return dst } -// Uint64Map converts a string map of uint64 values uinto a string -// map of uint64 pouinters +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers func Uint64Map(src map[string]uint64) map[string]*uint64 { dst := make(map[string]*uint64) for k, val := range src { @@ -464,7 +531,7 @@ func Uint64Map(src map[string]uint64) map[string]*uint64 { return dst } -// Uint64ValueMap converts a string map of uint64 pouinters uinto a string +// Uint64ValueMap converts a string map of uint64 pointers into a string // map of uint64 values func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { dst := make(map[string]uint64) @@ -476,6 +543,74 @@ func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { return dst } +// Float32 returns a pointer to of the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + // Float64 returns a pointer to of the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod index 15bbb082227..fb29b65b256 100644 --- a/vendor/github.com/go-openapi/swag/go.mod +++ b/vendor/github.com/go-openapi/swag/go.mod @@ -2,13 +2,17 @@ module github.com/go-openapi/swag require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 - github.com/stretchr/testify v1.3.0 - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/yaml.v2 v2.2.2 + github.com/kr/text v0.2.0 // indirect + github.com/mailru/easyjson v0.7.6 + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/stretchr/testify v1.6.1 + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect ) replace github.com/golang/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422 replace sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1 + +go 1.11 diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum index 33469f54acb..a45da809afa 100644 --- a/vendor/github.com/go-openapi/swag/go.sum +++ b/vendor/github.com/go-openapi/swag/go.sum @@ -1,20 +1,29 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index edf93d84c60..7e9902ca314 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -51,7 +51,7 @@ type ejUnmarshaler interface { UnmarshalEasyJSON(w *jlexer.Lexer) } -// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller +// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler // so it takes the fastest option available. func WriteJSON(data interface{}) ([]byte, error) { if d, ok := data.(ejMarshaler); ok { @@ -65,8 +65,8 @@ func WriteJSON(data interface{}) ([]byte, error) { return json.Marshal(data) } -// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller -// so it takes the fastes option available +// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler +// so it takes the fastest option available func ReadJSON(data []byte, value interface{}) error { trimmedData := bytes.Trim(data, "\x00") if d, ok := value.(ejUnmarshaler); ok { @@ -189,7 +189,7 @@ func FromDynamicJSON(data, target interface{}) error { return json.Unmarshal(b, target) } -// NameProvider represents an object capabale of translating from go property names +// NameProvider represents an object capable of translating from go property names // to json property names // This type is thread-safe. type NameProvider struct { diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 70f4fb361c1..9a60409725e 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -19,7 +19,9 @@ import ( "io/ioutil" "log" "net/http" + "net/url" "path/filepath" + "runtime" "strings" "time" ) @@ -27,6 +29,15 @@ import ( // LoadHTTPTimeout the default timeout for load requests var LoadHTTPTimeout = 30 * time.Second +// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth +var LoadHTTPBasicAuthUsername = "" + +// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth +var LoadHTTPBasicAuthPassword = "" + +// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests +var LoadHTTPCustomHeaders = map[string]string{} + // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) @@ -48,6 +59,26 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( if err != nil { return nil, err } + + if strings.HasPrefix(pth, `file://`) { + if runtime.GOOS == "windows" { + // support for canonical file URIs on windows. + // Zero tolerance here for dodgy URIs. + u, _ := url.Parse(upth) + if u.Host != "" { + // assume UNC name (volume share) + // file://host/share/folder\... ==> \\host\share\path\folder + // NOTE: UNC port not yet supported + upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) + } else { + // file:///c:/folder/... ==> just remove the leading slash + upth = strings.TrimPrefix(upth, `file:///`) + } + } else { + upth = strings.TrimPrefix(upth, `file://`) + } + } + return local(filepath.FromSlash(upth)) } } @@ -55,10 +86,19 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return func(path string) ([]byte, error) { client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) + req, err := http.NewRequest("GET", path, nil) // nolint: noctx if err != nil { return nil, err } + + if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" { + req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword) + } + + for key, val := range LoadHTTPCustomHeaders { + req.Header.Set(key, val) + } + resp, err := client.Do(req) defer func() { if resp != nil { diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 9eac16afb2e..193702f2cec 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -31,7 +31,7 @@ var isInitialism func(string) bool // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // -// e.g. to help converting "123" into "{prefix}123" +// e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string @@ -91,7 +91,7 @@ func init() { } const ( - //collectionFormatComma = "csv" + // collectionFormatComma = "csv" collectionFormatSpace = "ssv" collectionFormatTab = "tsv" collectionFormatPipe = "pipes" @@ -370,7 +370,7 @@ func IsZero(data interface{}) bool { // AddInitialisms add additional initialisms func AddInitialisms(words ...string) { for _, word := range words { - //commonInitialisms[upper(word)] = true + // commonInitialisms[upper(word)] = true commonInitialisms.add(upper(word)) } // sort again diff --git a/vendor/github.com/go-playground/validator/v10/.travis.yml b/vendor/github.com/go-playground/validator/v10/.travis.yml deleted file mode 100644 index 85a7be34b11..00000000000 --- a/vendor/github.com/go-playground/validator/v10/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go -go: - - 1.15.2 - - tip -matrix: - allow_failures: - - go: tip - -notifications: - email: - recipients: dean.karn@gmail.com - on_success: change - on_failure: always - -before_install: - - go install github.com/mattn/goveralls - - mkdir -p $GOPATH/src/gopkg.in - - ln -s $GOPATH/src/github.com/$TRAVIS_REPO_SLUG $GOPATH/src/gopkg.in/validator.v9 - -# Only clone the most recent commit. -git: - depth: 1 - -script: - - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... - -after_success: | - [ $TRAVIS_GO_VERSION = 1.15.2 ] && - goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index 04fbb3c8699..2ba36d73f1e 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================ [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.4.1-green.svg) +![Project status](https://img.shields.io/badge/version-10.5.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) @@ -27,11 +27,11 @@ Installation Use go get. - go get github.com/go-playground/validator/v10 + go get github.com/go-playground/validator Then import the validator package into your own code. - import "github.com/go-playground/validator/v10" + import "github.com/go-playground/validator" Error Return Value ------- @@ -53,7 +53,7 @@ validationErrors := err.(validator.ValidationErrors) Usage and documentation ------ -Please see https://godoc.org/github.com/go-playground/validator for detailed usage docs. +Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detailed usage docs. ##### Examples: diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index 6ce762d152a..dd2570596f8 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -18,6 +18,7 @@ import ( "unicode/utf8" "golang.org/x/crypto/sha3" + "golang.org/x/text/language" urn "github.com/leodido/go-urn" ) @@ -188,6 +189,7 @@ var ( "iso3166_1_alpha2": isIso3166Alpha2, "iso3166_1_alpha3": isIso3166Alpha3, "iso3166_1_alpha_numeric": isIso3166AlphaNumeric, + "bcp47_language_tag": isBCP47LanguageTag, } ) @@ -791,6 +793,9 @@ func isNeField(fl FieldLevel) bool { case reflect.Slice, reflect.Map, reflect.Array: return int64(field.Len()) != int64(currentField.Len()) + case reflect.Bool: + return field.Bool() != currentField.Bool() + case reflect.Struct: fieldType := field.Type() @@ -1544,7 +1549,7 @@ func requiredWithout(fl FieldLevel) bool { return true } -// RequiredWithoutAll is the validation function +// ExcludedWithoutAll is the validation function // The field under validation must not be present or is empty when all of the other specified fields are not present. func excludedWithoutAll(fl FieldLevel) bool { params := parseOneOfParam2(fl.Param()) @@ -2283,3 +2288,15 @@ func isIso3166AlphaNumeric(fl FieldLevel) bool { } return iso3166_1_alpha_numeric[code] } + +// isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse +func isBCP47LanguageTag(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + _, err := language.Parse(field.String()) + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index a816c20a418..207d9879aca 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -1221,6 +1221,13 @@ see: https://www.iso.org/iso-3166-country-codes.html Usage: iso3166_1_alpha3 +BCP 47 Language Tag + +This validates that a string value is a valid BCP 47 language tag, as parsed by language.Parse. +More information on https://pkg.go.dev/golang.org/x/text/language + + Usage: bcp47_language_tag + TimeZone This validates that a string value is a valid time zone based on the time zone database present on the system. @@ -1228,7 +1235,7 @@ Although empty value and Local value are allowed by time.LoadLocation golang fun More information on https://golang.org/pkg/time/#LoadLocation Usage: timezone - + Alias Validators and Tags diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go index 63293cf9edc..f6e72da7d21 100644 --- a/vendor/github.com/go-playground/validator/v10/errors.go +++ b/vendor/github.com/go-playground/validator/v10/errors.go @@ -146,7 +146,7 @@ type FieldError interface { // Type returns the Field's reflect Type // - // // eg. time.Time's type is time.Time + // eg. time.Time's type is time.Time Type() reflect.Type // returns the FieldError's translated error diff --git a/vendor/github.com/go-playground/validator/v10/go.mod b/vendor/github.com/go-playground/validator/v10/go.mod index d457100ea5d..53c4820cd2c 100644 --- a/vendor/github.com/go-playground/validator/v10/go.mod +++ b/vendor/github.com/go-playground/validator/v10/go.mod @@ -8,4 +8,5 @@ require ( github.com/go-playground/universal-translator v0.17.0 github.com/leodido/go-urn v1.2.0 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/text v0.3.2 // indirect ) diff --git a/vendor/github.com/go-playground/validator/v10/go.sum b/vendor/github.com/go-playground/validator/v10/go.sum index 01526427378..4b00cf6675b 100644 --- a/vendor/github.com/go-playground/validator/v10/go.sum +++ b/vendor/github.com/go-playground/validator/v10/go.sum @@ -10,17 +10,22 @@ github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go index f097f394296..9569c0dd505 100644 --- a/vendor/github.com/go-playground/validator/v10/validator.go +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -74,7 +74,7 @@ func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, cur } } - v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags) + v.traverseField(ctx, current, current.Field(f.idx), ns, structNs, f, f.cTags) } } @@ -222,7 +222,7 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr structNs = append(append(structNs, cf.name...), '.') } - v.validateStruct(ctx, current, current, typ, ns, structNs, ct) + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) return } } diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go index fe6a48775f8..d230a3248d9 100644 --- a/vendor/github.com/go-playground/validator/v10/validator_instance.go +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -29,6 +29,10 @@ const ( requiredWithAllTag = "required_with_all" requiredIfTag = "required_if" requiredUnlessTag = "required_unless" + excludedWithoutAllTag = "excluded_without_all" + excludedWithoutTag = "excluded_without" + excludedWithTag = "excluded_with" + excludedWithAllTag = "excluded_with_all" skipValidationTag = "-" diveTag = "dive" keysTag = "keys" @@ -111,7 +115,8 @@ func New() *Validate { switch k { // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour - case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag: + case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag, + excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag: _ = v.registerValidation(k, wrapFunc(val), true, true) default: // no need to error check here, baked in will always be valid @@ -409,7 +414,10 @@ func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields . if len(flds) > 0 { vd.misc = append(vd.misc[0:0], name...) - vd.misc = append(vd.misc, '.') + // Don't append empty name for unnamed structs + if len(vd.misc) != 0 { + vd.misc = append(vd.misc, '.') + } for _, s := range flds { diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md index efc3204fc0f..2685a832e38 100644 --- a/vendor/github.com/gofrs/uuid/README.md +++ b/vendor/github.com/gofrs/uuid/README.md @@ -12,7 +12,6 @@ and parsing of UUIDs in different formats. This package supports the following UUID versions: * Version 1, based on timestamp and MAC address (RFC-4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) * Version 3, based on MD5 hashing of a named value (RFC-4122) * Version 4, based on random numbers (RFC-4122) * Version 5, based on SHA-1 hashing of a named value (RFC-4122) diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go index 4257761f154..2783d9e7787 100644 --- a/vendor/github.com/gofrs/uuid/generator.go +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -30,7 +30,6 @@ import ( "hash" "io" "net" - "os" "sync" "time" ) @@ -47,21 +46,11 @@ type HWAddrFunc func() (net.HardwareAddr, error) // DefaultGenerator is the default UUID Generator used by this package. var DefaultGenerator Generator = NewGen() -var ( - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - // NewV1 returns a UUID based on the current timestamp and MAC address. func NewV1() (UUID, error) { return DefaultGenerator.NewV1() } -// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. -func NewV2(domain byte) (UUID, error) { - return DefaultGenerator.NewV2(domain) -} - // NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. func NewV3(ns UUID, name string) UUID { return DefaultGenerator.NewV3(ns, name) @@ -80,7 +69,6 @@ func NewV5(ns UUID, name string) UUID { // Generator provides an interface for generating UUIDs. type Generator interface { NewV1() (UUID, error) - NewV2(domain byte) (UUID, error) NewV3(ns UUID, name string) UUID NewV4() (UUID, error) NewV5(ns UUID, name string) UUID @@ -164,28 +152,6 @@ func (g *Gen) NewV1() (UUID, error) { return u, nil } -// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. -func (g *Gen) NewV2(domain byte) (UUID, error) { - u, err := g.NewV1() - if err != nil { - return Nil, err - } - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[:], posixGID) - } - - u[9] = domain - - u.SetVersion(V2) - u.SetVariant(VariantRFC4122) - - return u, nil -} - // NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. func (g *Gen) NewV3(ns UUID, name string) UUID { u := newFromHash(md5.New(), ns, name) @@ -216,7 +182,7 @@ func (g *Gen) NewV5(ns UUID, name string) UUID { return u } -// Returns the epoch and clock sequence. +// getClockSequence returns the epoch and clock sequence. func (g *Gen) getClockSequence() (uint64, uint16, error) { var err error g.clockSequenceOnce.Do(func() { diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go index 9c4547f17f4..78aed6e2539 100644 --- a/vendor/github.com/gofrs/uuid/uuid.go +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -19,11 +19,19 @@ // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// Package uuid provides implementations of the Universally Unique Identifier (UUID), as specified in RFC-4122 and DCE 1.1. +// Package uuid provides implementations of the Universally Unique Identifier +// (UUID), as specified in RFC-4122, // // RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. // -// DCE 1.1[2] provides the specification for version 2. +// DCE 1.1[2] provides the specification for version 2, but version 2 support +// was removed from this package in v4 due to some concerns with the +// specification itself. Reading the spec, it seems that it would result in +// generating UUIDs that aren't very unique. In having read the spec it seemed +// that our implementation did not meet the spec. It also seems to be at-odds +// with RFC 4122, meaning we would need quite a bit of special code to support +// it. Lastly, there were no Version 2 implementations that we could find to +// ensure we were understanding the specification correctly. // // [1] https://tools.ietf.org/html/rfc4122 // [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 @@ -48,7 +56,7 @@ type UUID [Size]byte const ( _ byte = iota V1 // Version 1 (date-time and MAC address) - V2 // Version 2 (date-time and MAC address, DCE security version) + _ // Version 2 (date-time and MAC address, DCE security version) [removed] V3 // Version 3 (namespace name-based) V4 // Version 4 (random) V5 // Version 5 (namespace name-based) @@ -70,8 +78,8 @@ const ( ) // Timestamp is the count of 100-nanosecond intervals since 00:00:00.00, -// 15 October 1582 within a V1 UUID. This type has no meaning for V2-V5 -// UUIDs since they don't have an embedded timestamp. +// 15 October 1582 within a V1 UUID. This type has no meaning for other +// UUID versions since they don't have an embedded timestamp. type Timestamp uint64 const _100nsPerSecond = 10000000 diff --git a/vendor/github.com/golang/mock/gomock/call.go b/vendor/github.com/golang/mock/gomock/call.go index 7345f6540f6..b18cc2d6147 100644 --- a/vendor/github.com/golang/mock/gomock/call.go +++ b/vendor/github.com/golang/mock/gomock/call.go @@ -63,6 +63,9 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle } } + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. origin := callerInfo(3) actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { // Synthesize the zero value for each of the return args' types. @@ -301,14 +304,9 @@ func (c *Call) matches(args []interface{}) error { for i, m := range c.args { if !m.Matches(args[i]) { - got := fmt.Sprintf("%v", args[i]) - if gs, ok := m.(GotFormatter); ok { - got = gs.Got(args[i]) - } - return fmt.Errorf( "expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v", - c.origin, i, got, m, + c.origin, i, formatGottenArg(m, args[i]), m, ) } } @@ -331,7 +329,7 @@ func (c *Call) matches(args []interface{}) error { // Non-variadic args if !m.Matches(args[i]) { return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), args[i], m) + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m) } continue } @@ -373,9 +371,9 @@ func (c *Call) matches(args []interface{}) error { // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE) // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD) // Got Foo(a, b, c) want Foo(matcherA, matcherB) - return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), args[i:], c.args[i]) + return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i]) } } @@ -425,3 +423,11 @@ func setSlice(arg interface{}, v reflect.Value) { func (c *Call) addAction(action func([]interface{}) []interface{}) { c.actions = append(c.actions, action) } + +func formatGottenArg(m Matcher, arg interface{}) string { + got := fmt.Sprintf("%v", arg) + if gs, ok := m.(GotFormatter); ok { + got = gs.Got(arg) + } + return got +} diff --git a/vendor/github.com/golang/mock/gomock/callset.go b/vendor/github.com/golang/mock/gomock/callset.go index b046b525e96..e4e85d602f8 100644 --- a/vendor/github.com/golang/mock/gomock/callset.go +++ b/vendor/github.com/golang/mock/gomock/callset.go @@ -84,7 +84,11 @@ func (cs callSet) FindMatch(receiver interface{}, method string, args []interfac for _, call := range exhausted { if err := call.matches(args); err != nil { _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) + continue } + _, _ = fmt.Fprintf( + &callsErrors, "all expected calls for method %q have been exhausted", method, + ) } if len(expected)+len(exhausted) == 0 { diff --git a/vendor/github.com/golang/mock/gomock/controller.go b/vendor/github.com/golang/mock/gomock/controller.go index d7c3c656ae0..3b6569091b2 100644 --- a/vendor/github.com/golang/mock/gomock/controller.go +++ b/vendor/github.com/golang/mock/gomock/controller.go @@ -50,9 +50,6 @@ // mockObj.EXPECT().SomeMethod(2, "second"), // mockObj.EXPECT().SomeMethod(3, "third"), // ) -// -// TODO: -// - Handle different argument/return types (e.g. ..., chan, map, interface). package gomock import ( @@ -77,6 +74,15 @@ type TestHelper interface { Helper() } +// cleanuper is used to check if TestHelper also has the `Cleanup` method. A +// common pattern is to pass in a `*testing.T` to +// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup +// method. This can be utilized to call `Finish()` so the caller of this library +// does not have to. +type cleanuper interface { + Cleanup(func()) +} + // A Controller represents the top-level control of a mock ecosystem. It // defines the scope and lifetime of mock objects, as well as their // expectations. It is safe to call Controller's methods from multiple @@ -115,29 +121,43 @@ type Controller struct { // NewController returns a new Controller. It is the preferred way to create a // Controller. +// +// New in go1.14+, if you are passing a *testing.T into this function you no +// longer need to call ctrl.Finish() in your test methods func NewController(t TestReporter) *Controller { h, ok := t.(TestHelper) if !ok { - h = nopTestHelper{t} + h = &nopTestHelper{t} } - - return &Controller{ + ctrl := &Controller{ T: h, expectedCalls: newCallSet(), } + if c, ok := isCleanuper(ctrl.T); ok { + c.Cleanup(func() { + ctrl.T.Helper() + ctrl.finish(true, nil) + }) + } + + return ctrl } type cancelReporter struct { - TestHelper + t TestHelper cancel func() } func (r *cancelReporter) Errorf(format string, args ...interface{}) { - r.TestHelper.Errorf(format, args...) + r.t.Errorf(format, args...) } func (r *cancelReporter) Fatalf(format string, args ...interface{}) { defer r.cancel() - r.TestHelper.Fatalf(format, args...) + r.t.Fatalf(format, args...) +} + +func (r *cancelReporter) Helper() { + r.t.Helper() } // WithContext returns a new Controller and a Context, which is cancelled on any @@ -145,15 +165,22 @@ func (r *cancelReporter) Fatalf(format string, args ...interface{}) { func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) { h, ok := t.(TestHelper) if !ok { - h = nopTestHelper{t} + h = &nopTestHelper{t: t} } ctx, cancel := context.WithCancel(ctx) - return NewController(&cancelReporter{h, cancel}), ctx + return NewController(&cancelReporter{t: h, cancel: cancel}), ctx } type nopTestHelper struct { - TestReporter + t TestReporter +} + +func (h *nopTestHelper) Errorf(format string, args ...interface{}) { + h.t.Errorf(format, args...) +} +func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { + h.t.Fatalf(format, args...) } func (h nopTestHelper) Helper() {} @@ -197,7 +224,10 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args) if err != nil { - origin := callerInfo(2) + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test. + origin := callerInfo(3) ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err) } @@ -230,20 +260,29 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf // were called. It should be invoked for each Controller. It is not idempotent // and therefore can only be invoked once. func (ctrl *Controller) Finish() { + // If we're currently panicking, probably because this is a deferred call. + // This must be recovered in the deferred function. + err := recover() + ctrl.finish(false, err) +} + +func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { ctrl.T.Helper() ctrl.mu.Lock() defer ctrl.mu.Unlock() if ctrl.finished { - ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + if _, ok := isCleanuper(ctrl.T); !ok { + ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + } + return } ctrl.finished = true - // If we're currently panicking, probably because this is a deferred call, - // pass through the panic. - if err := recover(); err != nil { - panic(err) + // Short-circuit, pass through the panic. + if panicErr != nil { + panic(panicErr) } // Check that all remaining expected calls are satisfied. @@ -252,13 +291,43 @@ func (ctrl *Controller) Finish() { ctrl.T.Errorf("missing call(s) to %v", call) } if len(failures) != 0 { - ctrl.T.Fatalf("aborting test due to missing call(s)") + if !cleanup { + ctrl.T.Fatalf("aborting test due to missing call(s)") + return + } + ctrl.T.Errorf("aborting test due to missing call(s)") } } +// callerInfo returns the file:line of the call site. skip is the number +// of stack frames to skip when reporting. 0 is callerInfo's call site. func callerInfo(skip int) string { if _, file, line, ok := runtime.Caller(skip + 1); ok { return fmt.Sprintf("%s:%d", file, line) } return "unknown file" } + +// isCleanuper checks it if t's base TestReporter has a Cleanup method. +func isCleanuper(t TestReporter) (cleanuper, bool) { + tr := unwrapTestReporter(t) + c, ok := tr.(cleanuper) + return c, ok +} + +// unwrapTestReporter unwraps TestReporter to the base implementation. +func unwrapTestReporter(t TestReporter) TestReporter { + tr := t + switch nt := t.(type) { + case *cancelReporter: + tr = nt.t + if h, check := tr.(*nopTestHelper); check { + tr = h.t + } + case *nopTestHelper: + tr = nt.t + default: + // not wrapped + } + return tr +} diff --git a/vendor/github.com/golang/mock/gomock/matchers.go b/vendor/github.com/golang/mock/gomock/matchers.go index 7bfc07be43f..770aba5a310 100644 --- a/vendor/github.com/golang/mock/gomock/matchers.go +++ b/vendor/github.com/golang/mock/gomock/matchers.go @@ -102,7 +102,21 @@ type eqMatcher struct { } func (e eqMatcher) Matches(x interface{}) bool { - return reflect.DeepEqual(e.x, x) + // In case, some value is nil + if e.x == nil || x == nil { + return reflect.DeepEqual(e.x, x) + } + + // Check if types assignable and convert them to common type + x1Val := reflect.ValueOf(e.x) + x2Val := reflect.ValueOf(x) + + if x1Val.Type().AssignableTo(x2Val.Type()) { + x1ValConverted := x1Val.Convert(x2Val.Type()) + return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface()) + } + + return false } func (e eqMatcher) String() string { @@ -245,7 +259,7 @@ func Not(x interface{}) Matcher { // AssignableToTypeOf(s).Matches(time.Second) // returns true // AssignableToTypeOf(s).Matches(99) // returns false // -// var ctx = reflect.TypeOf((*context.Context)).Elem() +// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() // AssignableToTypeOf(ctx).Matches(context.Background()) // returns true func AssignableToTypeOf(x interface{}) Matcher { if xt, ok := x.(reflect.Type); ok { diff --git a/vendor/github.com/golang/mock/mockgen/mockgen.go b/vendor/github.com/golang/mock/mockgen/mockgen.go index 685a7cb7496..f816b093c64 100644 --- a/vendor/github.com/golang/mock/mockgen/mockgen.go +++ b/vendor/github.com/golang/mock/mockgen/mockgen.go @@ -23,8 +23,6 @@ import ( "encoding/json" "flag" "fmt" - "go/build" - "go/format" "go/token" "io" "io/ioutil" @@ -39,6 +37,8 @@ import ( "unicode" "github.com/golang/mock/mockgen/model" + + toolsimports "golang.org/x/tools/imports" ) const ( @@ -132,15 +132,14 @@ func main() { // "package.X" since "package" is this package). This can happen if the mock // is output into an already existing package. outputPackagePath := *selfPackage - if len(outputPackagePath) == 0 && len(*destination) > 0 { - dst, _ := filepath.Abs(filepath.Dir(*destination)) - for _, prefix := range build.Default.SrcDirs() { - if strings.HasPrefix(dst, prefix) { - if rel, err := filepath.Rel(prefix, dst); err == nil { - outputPackagePath = rel - break - } - } + if outputPackagePath == "" && *destination != "" { + dstPath, err := filepath.Abs(filepath.Dir(*destination)) + if err != nil { + log.Fatalf("Unable to determine destination file path: %v", err) + } + outputPackagePath, err = parsePackageImport(dstPath) + if err != nil { + log.Fatalf("Unable to determine destination file path: %v", err) } } @@ -151,6 +150,7 @@ func main() { g.srcPackage = packageName g.srcInterfaces = flag.Arg(1) } + g.destination = *destination if *mockNames != "" { g.mockNames = parseMockNames(*mockNames) @@ -210,6 +210,7 @@ type generator struct { indent string mockNames map[string]string // may be empty filename string // may be empty + destination string // may be empty srcPackage, srcInterfaces string // may be empty copyrightHeader string @@ -327,7 +328,7 @@ func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPac } // Avoid importing package if source pkg == output pkg - if pth == pkg.PkgPath && outputPkgName == pkg.Name { + if pth == pkg.PkgPath && outputPackagePath == pkg.PkgPath { continue } @@ -376,7 +377,7 @@ func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePa mockType := g.mockName(intf.Name) g.p("") - g.p("// %v is a mock of %v interface", mockType, intf.Name) + g.p("// %v is a mock of %v interface.", mockType, intf.Name) g.p("type %v struct {", mockType) g.in() g.p("ctrl *gomock.Controller") @@ -385,7 +386,7 @@ func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePa g.p("}") g.p("") - g.p("// %vMockRecorder is the mock recorder for %v", mockType, mockType) + g.p("// %vMockRecorder is the mock recorder for %v.", mockType, mockType) g.p("type %vMockRecorder struct {", mockType) g.in() g.p("mock *%v", mockType) @@ -398,7 +399,7 @@ func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePa // g.p("var _ %v = (*%v)(nil)", typeName, mockType) // g.p("") - g.p("// New%v creates a new mock instance", mockType) + g.p("// New%v creates a new mock instance.", mockType) g.p("func New%v(ctrl *gomock.Controller) *%v {", mockType, mockType) g.in() g.p("mock := &%v{ctrl: ctrl}", mockType) @@ -409,7 +410,7 @@ func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePa g.p("") // XXX: possible name collision here if someone has EXPECT in their interface. - g.p("// EXPECT returns an object that allows the caller to indicate expected use") + g.p("// EXPECT returns an object that allows the caller to indicate expected use.") g.p("func (m *%v) EXPECT() *%vMockRecorder {", mockType, mockType) g.in() g.p("return m.recorder") @@ -421,7 +422,14 @@ func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePa return nil } +type byMethodName []*model.Method + +func (b byMethodName) Len() int { return len(b) } +func (b byMethodName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byMethodName) Less(i, j int) bool { return b[i].Name < b[j].Name } + func (g *generator) GenerateMockMethods(mockType string, intf *model.Interface, pkgOverride string) { + sort.Sort(byMethodName(intf.Methods)) for _, m := range intf.Methods { g.p("") _ = g.GenerateMockMethod(mockType, m, pkgOverride) @@ -465,7 +473,7 @@ func (g *generator) GenerateMockMethod(mockType string, m *model.Method, pkgOver ia := newIdentifierAllocator(argNames) idRecv := ia.allocateIdentifier("m") - g.p("// %v mocks base method", m.Name) + g.p("// %v mocks base method.", m.Name) g.p("func (%v *%v) %v(%v)%v {", idRecv, mockType, m.Name, argString, retString) g.in() g.p("%s.ctrl.T.Helper()", idRecv) @@ -533,7 +541,7 @@ func (g *generator) GenerateMockRecorderMethod(mockType string, m *model.Method) ia := newIdentifierAllocator(argNames) idRecv := ia.allocateIdentifier("mr") - g.p("// %v indicates an expected call of %v", m.Name, m.Name) + g.p("// %v indicates an expected call of %v.", m.Name, m.Name) g.p("func (%s *%vMockRecorder) %v(%v) *gomock.Call {", idRecv, mockType, m.Name, argString) g.in() g.p("%s.mock.ctrl.T.Helper()", idRecv) @@ -617,7 +625,7 @@ func (o identifierAllocator) allocateIdentifier(want string) string { // Output returns the generator's output, formatted in the standard Go style. func (g *generator) Output() []byte { - src, err := format.Source(g.buf.Bytes()) + src, err := toolsimports.Process(g.destination, g.buf.Bytes(), nil) if err != nil { log.Fatalf("Failed to format generated source code: %s\n%s", err, g.buf.String()) } diff --git a/vendor/github.com/golang/mock/mockgen/model/model.go b/vendor/github.com/golang/mock/mockgen/model/model.go index 88783cfdf6e..d06d5162282 100644 --- a/vendor/github.com/golang/mock/mockgen/model/model.go +++ b/vendor/github.com/golang/mock/mockgen/model/model.go @@ -71,6 +71,16 @@ func (intf *Interface) addImports(im map[string]bool) { } } +// AddMethod adds a new method, deduplicating by method name. +func (intf *Interface) AddMethod(m *Method) { + for _, me := range intf.Methods { + if me.Name == m.Name { + return + } + } + intf.Methods = append(intf.Methods, m) +} + // Method is a single method of an interface. type Method struct { Name string @@ -311,7 +321,7 @@ func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) { return nil, err } - intf.Methods = append(intf.Methods, m) + intf.AddMethod(m) } return intf, nil @@ -468,3 +478,19 @@ func impPath(imp string) string { } return imp } + +// ErrorInterface represent built-in error interface. +var ErrorInterface = Interface{ + Name: "error", + Methods: []*Method{ + { + Name: "Error", + Out: []*Parameter{ + { + Name: "", + Type: PredeclaredType("string"), + }, + }, + }, + }, +} diff --git a/vendor/github.com/golang/mock/mockgen/parse.go b/vendor/github.com/golang/mock/mockgen/parse.go index a8edde805e0..ef8a95953fa 100644 --- a/vendor/github.com/golang/mock/mockgen/parse.go +++ b/vendor/github.com/golang/mock/mockgen/parse.go @@ -26,13 +26,14 @@ import ( "go/token" "io/ioutil" "log" + "os" "path" "path/filepath" "strconv" "strings" "github.com/golang/mock/mockgen/model" - "golang.org/x/tools/go/packages" + "golang.org/x/mod/modfile" ) var ( @@ -49,7 +50,7 @@ func sourceMode(source string) (*model.Package, error) { return nil, fmt.Errorf("failed getting source directory: %v", err) } - packageImport, err := parsePackageImport(source, srcDir) + packageImport, err := parsePackageImport(srcDir) if err != nil { return nil, err } @@ -62,7 +63,7 @@ func sourceMode(source string) (*model.Package, error) { p := &fileParser{ fileSet: fs, - imports: make(map[string]string), + imports: make(map[string]importedPackage), importedInterfaces: make(map[string]map[string]*ast.InterfaceType), auxInterfaces: make(map[string]map[string]*ast.InterfaceType), srcDir: srcDir, @@ -79,7 +80,7 @@ func sourceMode(source string) (*model.Package, error) { dotImports[v] = true } else { // TODO: Catch dupes? - p.imports[k] = v + p.imports[k] = importedPkg{path: v} } } } @@ -100,9 +101,39 @@ func sourceMode(source string) (*model.Package, error) { return pkg, nil } +type importedPackage interface { + Path() string + Parser() *fileParser +} + +type importedPkg struct { + path string + parser *fileParser +} + +func (i importedPkg) Path() string { return i.path } +func (i importedPkg) Parser() *fileParser { return i.parser } + +// duplicateImport is a bit of a misnomer. Currently the parser can't +// handle cases of multi-file packages importing different packages +// under the same name. Often these imports would not be problematic, +// so this type lets us defer raising an error unless the package name +// is actually used. +type duplicateImport struct { + name string + duplicates []string +} + +func (d duplicateImport) Error() string { + return fmt.Sprintf("%q is ambigous because of duplicate imports: %v", d.name, d.duplicates) +} + +func (d duplicateImport) Path() string { log.Fatal(d.Error()); return "" } +func (d duplicateImport) Parser() *fileParser { log.Fatal(d.Error()); return nil } + type fileParser struct { fileSet *token.FileSet - imports map[string]string // package name => import path + imports map[string]importedPackage // package name => imported package importedInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface auxFiles []*ast.File @@ -154,18 +185,18 @@ func (p *fileParser) addAuxInterfacesFromFile(pkg string, file *ast.File) { func (p *fileParser) parseFile(importPath string, file *ast.File) (*model.Package, error) { allImports, dotImports := importsOfFile(file) // Don't stomp imports provided by -imports. Those should take precedence. - for pkg, pkgPath := range allImports { + for pkg, pkgI := range allImports { if _, ok := p.imports[pkg]; !ok { - p.imports[pkg] = pkgPath + p.imports[pkg] = pkgI } } // Add imports from auxiliary files, which might be needed for embedded interfaces. // Don't stomp any other imports. for _, f := range p.auxFiles { auxImports, _ := importsOfFile(f) - for pkg, pkgPath := range auxImports { + for pkg, pkgI := range auxImports { if _, ok := p.imports[pkg]; !ok { - p.imports[pkg] = pkgPath + p.imports[pkg] = pkgI } } } @@ -186,31 +217,38 @@ func (p *fileParser) parseFile(importPath string, file *ast.File) (*model.Packag }, nil } -// parsePackage loads package specified by path, parses it and populates -// corresponding imports and importedInterfaces into the fileParser. -func (p *fileParser) parsePackage(path string) error { +// parsePackage loads package specified by path, parses it and returns +// a new fileParser with the parsed imports and interfaces. +func (p *fileParser) parsePackage(path string) (*fileParser, error) { + newP := &fileParser{ + fileSet: token.NewFileSet(), + imports: make(map[string]importedPackage), + importedInterfaces: make(map[string]map[string]*ast.InterfaceType), + auxInterfaces: make(map[string]map[string]*ast.InterfaceType), + srcDir: p.srcDir, + } + var pkgs map[string]*ast.Package - if imp, err := build.Import(path, p.srcDir, build.FindOnly); err != nil { - return err - } else if pkgs, err = parser.ParseDir(p.fileSet, imp.Dir, nil, 0); err != nil { - return err + if imp, err := build.Import(path, newP.srcDir, build.FindOnly); err != nil { + return nil, err + } else if pkgs, err = parser.ParseDir(newP.fileSet, imp.Dir, nil, 0); err != nil { + return nil, err } + for _, pkg := range pkgs { file := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates|ast.FilterUnassociatedComments|ast.FilterImportDuplicates) - if _, ok := p.importedInterfaces[path]; !ok { - p.importedInterfaces[path] = make(map[string]*ast.InterfaceType) + if _, ok := newP.importedInterfaces[path]; !ok { + newP.importedInterfaces[path] = make(map[string]*ast.InterfaceType) } for ni := range iterInterfaces(file) { - p.importedInterfaces[path][ni.name.Name] = ni.it + newP.importedInterfaces[path][ni.name.Name] = ni.it } imports, _ := importsOfFile(file) - for pkgName, pkgPath := range imports { - if _, ok := p.imports[pkgName]; !ok { - p.imports[pkgName] = pkgPath - } + for pkgName, pkgI := range imports { + newP.imports[pkgName] = pkgI } } - return nil + return newP, nil } func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*model.Interface, error) { @@ -229,22 +267,33 @@ func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*m if err != nil { return nil, err } - intf.Methods = append(intf.Methods, m) + intf.AddMethod(m) case *ast.Ident: // Embedded interface in this package. ei := p.auxInterfaces[pkg][v.String()] if ei == nil { - if ei = p.importedInterfaces[pkg][v.String()]; ei == nil { + ei = p.importedInterfaces[pkg][v.String()] + } + + var eintf *model.Interface + if ei != nil { + var err error + eintf, err = p.parseInterface(v.String(), pkg, ei) + if err != nil { + return nil, err + } + } else { + // This is built-in error interface. + if v.String() == model.ErrorInterface.Name { + eintf = &model.ErrorInterface + } else { return nil, p.errorf(v.Pos(), "unknown embedded interface %s", v.String()) } } - eintf, err := p.parseInterface(v.String(), pkg, ei) - if err != nil { - return nil, err - } // Copy the methods. - // TODO: apply shadowing rules. - intf.Methods = append(intf.Methods, eintf.Methods...) + for _, m := range eintf.Methods { + intf.AddMethod(m) + } case *ast.SelectorExpr: // Embedded interface in another package. fpkg, sel := v.X.(*ast.Ident).String(), v.Sel.String() @@ -252,25 +301,42 @@ func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*m if !ok { return nil, p.errorf(v.X.Pos(), "unknown package %s", fpkg) } + + var eintf *model.Interface + var err error ei := p.auxInterfaces[fpkg][sel] - if ei == nil { - fpkg = epkg - if _, ok = p.importedInterfaces[epkg]; !ok { - if err := p.parsePackage(epkg); err != nil { - return nil, p.errorf(v.Pos(), "could not parse package %s: %v", fpkg, err) + if ei != nil { + eintf, err = p.parseInterface(sel, fpkg, ei) + if err != nil { + return nil, err + } + } else { + path := epkg.Path() + parser := epkg.Parser() + if parser == nil { + ip, err := p.parsePackage(path) + if err != nil { + return nil, p.errorf(v.Pos(), "could not parse package %s: %v", path, err) + } + parser = ip + p.imports[fpkg] = importedPkg{ + path: epkg.Path(), + parser: parser, } } - if ei = p.importedInterfaces[epkg][sel]; ei == nil { - return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", fpkg, sel) + if ei = parser.importedInterfaces[path][sel]; ei == nil { + return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", path, sel) + } + eintf, err = parser.parseInterface(sel, path, ei) + if err != nil { + return nil, err } - } - eintf, err := p.parseInterface(sel, fpkg, ei) - if err != nil { - return nil, err } // Copy the methods. // TODO: apply shadowing rules. - intf.Methods = append(intf.Methods, eintf.Methods...) + for _, m := range eintf.Methods { + intf.AddMethod(m) + } default: return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type) } @@ -383,7 +449,7 @@ func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) { // if so, patch the import w/ the fully qualified import maybeImportedPkg, ok := p.imports[pkg] if ok { - pkg = maybeImportedPkg + pkg = maybeImportedPkg.Path() } // assume type in this package return &model.NamedType{Package: pkg, Type: v.Name}, nil @@ -412,7 +478,7 @@ func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) { if !ok { return nil, p.errorf(v.Pos(), "unknown package %q", pkgName) } - return &model.NamedType{Package: pkg, Type: v.Sel.String()}, nil + return &model.NamedType{Package: pkg.Path(), Type: v.Sel.String()}, nil case *ast.StarExpr: t, err := p.parseType(pkg, v.X) if err != nil { @@ -424,6 +490,8 @@ func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) { return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed struct types") } return model.PredeclaredType("struct{}"), nil + case *ast.ParenExpr: + return p.parseType(pkg, v.X) } return nil, fmt.Errorf("don't know how to parse type %T", typ) @@ -431,7 +499,7 @@ func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) { // importsOfFile returns a map of package name to import path // of the imports in file. -func importsOfFile(file *ast.File) (normalImports map[string]string, dotImports []string) { +func importsOfFile(file *ast.File) (normalImports map[string]importedPackage, dotImports []string) { var importPaths []string for _, is := range file.Imports { if is.Name != nil { @@ -441,7 +509,7 @@ func importsOfFile(file *ast.File) (normalImports map[string]string, dotImports importPaths = append(importPaths, importPath) } packagesName := createPackageMap(importPaths) - normalImports = make(map[string]string) + normalImports = make(map[string]importedPackage) dotImports = make([]string, 0) for _, is := range file.Imports { var pkgName string @@ -469,11 +537,22 @@ func importsOfFile(file *ast.File) (normalImports map[string]string, dotImports if pkgName == "." { dotImports = append(dotImports, importPath) } else { - - if _, ok := normalImports[pkgName]; ok { - log.Fatalf("imported package collision: %q imported twice", pkgName) + if pkg, ok := normalImports[pkgName]; ok { + switch p := pkg.(type) { + case duplicateImport: + normalImports[pkgName] = duplicateImport{ + name: p.name, + duplicates: append([]string{importPath}, p.duplicates...), + } + case importedPkg: + normalImports[pkgName] = duplicateImport{ + name: pkgName, + duplicates: []string{p.path, importPath}, + } + } + } else { + normalImports[pkgName] = importedPkg{path: importPath} } - normalImports[pkgName] = importPath } } return @@ -539,27 +618,52 @@ func packageNameOfDir(srcDir string) (string, error) { return "", fmt.Errorf("go source file not found %s", srcDir) } - packageImport, err := parsePackageImport(goFilePath, srcDir) + packageImport, err := parsePackageImport(srcDir) if err != nil { return "", err } return packageImport, nil } +var errOutsideGoPath = errors.New("Source directory is outside GOPATH") + // parseImportPackage get package import path via source file -func parsePackageImport(source, srcDir string) (string, error) { - cfg := &packages.Config{Mode: packages.LoadFiles, Tests: true, Dir: srcDir} - pkgs, err := packages.Load(cfg, "file="+source) - if err != nil { - return "", err +// an alternative implementation is to use: +// cfg := &packages.Config{Mode: packages.NeedName, Tests: true, Dir: srcDir} +// pkgs, err := packages.Load(cfg, "file="+source) +// However, it will call "go list" and slow down the performance +func parsePackageImport(srcDir string) (string, error) { + moduleMode := os.Getenv("GO111MODULE") + // trying to find the module + if moduleMode != "off" { + currentDir := srcDir + for { + dat, err := ioutil.ReadFile(filepath.Join(currentDir, "go.mod")) + if os.IsNotExist(err) { + if currentDir == filepath.Dir(currentDir) { + // at the root + break + } + currentDir = filepath.Dir(currentDir) + continue + } else if err != nil { + return "", err + } + modulePath := modfile.ModulePath(dat) + return filepath.ToSlash(filepath.Join(modulePath, strings.TrimPrefix(srcDir, currentDir))), nil + } } - if packages.PrintErrors(pkgs) > 0 || len(pkgs) == 0 { - return "", errors.New("loading package failed") + // fall back to GOPATH mode + goPaths := os.Getenv("GOPATH") + if goPaths == "" { + return "", fmt.Errorf("GOPATH is not set") } - - packageImport := pkgs[0].PkgPath - - // It is illegal to import a _test package. - packageImport = strings.TrimSuffix(packageImport, "_test") - return packageImport, nil + goPathList := strings.Split(goPaths, string(os.PathListSeparator)) + for _, goPath := range goPathList { + sourceRoot := filepath.Join(goPath, "src") + string(os.PathSeparator) + if strings.HasPrefix(srcDir, sourceRoot) { + return filepath.ToSlash(strings.TrimPrefix(srcDir, sourceRoot)), nil + } + } + return "", errOutsideGoPath } diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff642057..066b4323b49 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff13c..85f9f57365f 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c627..d3c33259d28 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a41d..b2b55dd851f 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d49155..8368a3f70d3 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore similarity index 100% rename from vendor/github.com/klauspost/compress/snappy/.gitignore rename to vendor/github.com/golang/snappy/.gitignore diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS similarity index 90% rename from vendor/github.com/klauspost/compress/snappy/AUTHORS rename to vendor/github.com/golang/snappy/AUTHORS index bcfa19520af..203e84ebab0 100644 --- a/vendor/github.com/klauspost/compress/snappy/AUTHORS +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -8,8 +8,10 @@ # Please keep the list sorted. +Amazon.com, Inc Damian Gryski Google Inc. Jan Mercl <0xjnml@gmail.com> +Klaus Post Rodolfo Carvalho Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS similarity index 95% rename from vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS rename to vendor/github.com/golang/snappy/CONTRIBUTORS index 931ae31606f..d9914732b5f 100644 --- a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -28,7 +28,9 @@ Damian Gryski Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney Kai Backman +Klaus Post Marc-Antoine Ruel Nigel Tao Rob Pike diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE similarity index 100% rename from vendor/github.com/klauspost/compress/snappy/LICENSE rename to vendor/github.com/golang/snappy/LICENSE diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/golang/snappy/README similarity index 100% rename from vendor/github.com/klauspost/compress/snappy/README rename to vendor/github.com/golang/snappy/README diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go similarity index 97% rename from vendor/github.com/klauspost/compress/snappy/decode.go rename to vendor/github.com/golang/snappy/decode.go index 72efb0353dd..f1e04b172c5 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -52,6 +52,8 @@ const ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { @@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader { } // Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. type Reader struct { r io.Reader err error diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s similarity index 98% rename from vendor/github.com/klauspost/compress/snappy/decode_amd64.s rename to vendor/github.com/golang/snappy/decode_amd64.s index 1c66e37234d..e6179f65e35 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -184,7 +184,9 @@ tagLit60Plus: // checks. In the asm version, we code it once instead of once per switch case. ADDQ CX, SI SUBQ $58, SI - CMPQ SI, R13 + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 JA errCorrupt // case x == 60: @@ -230,7 +232,9 @@ tagCopy4: ADDQ $5, SI // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 JA errCorrupt // length = 1 + int(src[s-5])>>2 @@ -247,7 +251,9 @@ tagCopy2: ADDQ $3, SI // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 JA errCorrupt // length = 1 + int(src[s-3])>>2 @@ -271,7 +277,9 @@ tagCopy: ADDQ $2, SI // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 JA errCorrupt // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s new file mode 100644 index 00000000000..7a3ead17eac --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -0,0 +1,494 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 + +loop: + // for s < len(src) + CMP R13, R6 + BEQ end + + // R4 = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R4, R4 + CMPW R4, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R6, R6 + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R4 == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // case x == 60: + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R6), R4 + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R6), R4 + B doLit + +tagLit62Plus: + CMPW $62, R4 + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R6), R4 + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R6), R5 + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R6), R5 + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R3 == src[s] & 0x03 + // - R4 == src[s] + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R4 == length && R4 > 0 + // - R5 == offset + + // if offset <= 0 { etc } + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt + + // if d < offset { etc } + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt + + // if length > len(dst)-d { etc } + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R4 + BGT slowForwardCopy + CMP $8, R5 + BLT slowForwardCopy + CMP $16, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMP $8, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R7 being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R7 to R2 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R7, R2 + ADD R4, R7, R7 + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + CBNZ R4, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R10, R7 + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R2 + MOVD R2, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_asm.go similarity index 93% rename from vendor/github.com/klauspost/compress/snappy/decode_amd64.go rename to vendor/github.com/golang/snappy/decode_asm.go index fcd192b849e..7082b349199 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go similarity index 98% rename from vendor/github.com/klauspost/compress/snappy/decode_other.go rename to vendor/github.com/golang/snappy/decode_other.go index 94a96c5d7b8..2f672be5574 100644 --- a/vendor/github.com/klauspost/compress/snappy/decode_other.go +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy @@ -87,7 +87,7 @@ func decode(dst, src []byte) int { } // Copy from an earlier sub-slice of dst to a later sub-slice. // If no overlap, use the built-in copy: - if offset > length { + if offset >= length { copy(dst[d:d+length], dst[d-offset:]) d += length continue diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go similarity index 98% rename from vendor/github.com/klauspost/compress/snappy/encode.go rename to vendor/github.com/golang/snappy/encode.go index 8d393e904bb..7f23657076c 100644 --- a/vendor/github.com/klauspost/compress/snappy/encode.go +++ b/vendor/github.com/golang/snappy/encode.go @@ -15,6 +15,8 @@ import ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) @@ -139,6 +141,8 @@ func NewBufferedWriter(w io.Writer) *Writer { } // Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s similarity index 100% rename from vendor/github.com/klauspost/compress/snappy/encode_amd64.s rename to vendor/github.com/golang/snappy/encode_amd64.s diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 00000000000..bf83667d711 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,722 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + CMPW $60, R4 + BLT oneByte + CMPW $256, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + CMPW $68, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + CMP $64, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMP $12, R3 + BGE step3 + CMPW $2048, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + +memclr: + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15*1), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + CMP $16, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15*1), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_asm.go similarity index 97% rename from vendor/github.com/klauspost/compress/snappy/encode_amd64.go rename to vendor/github.com/golang/snappy/encode_asm.go index 150d91bc8be..107c1e71418 100644 --- a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go similarity index 99% rename from vendor/github.com/klauspost/compress/snappy/encode_other.go rename to vendor/github.com/golang/snappy/encode_other.go index dbcae905e6e..296d7f0beb0 100644 --- a/vendor/github.com/klauspost/compress/snappy/encode_other.go +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod new file mode 100644 index 00000000000..f6406bb2c76 --- /dev/null +++ b/vendor/github.com/golang/snappy/go.mod @@ -0,0 +1 @@ +module github.com/golang/snappy diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go similarity index 98% rename from vendor/github.com/klauspost/compress/snappy/snappy.go rename to vendor/github.com/golang/snappy/snappy.go index 74a36689e87..ece692ea461 100644 --- a/vendor/github.com/klauspost/compress/snappy/snappy.go +++ b/vendor/github.com/golang/snappy/snappy.go @@ -17,7 +17,7 @@ // // The canonical, C++ implementation is at https://github.com/google/snappy and // it only implements the block format. -package snappy +package snappy // import "github.com/golang/snappy" import ( "hash/crc32" diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index 6ff062f9bb4..b83acdbc6d3 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -796,7 +796,7 @@ func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { } // DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. +// the range [last, pivot), until iterator returns false. func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { if t.root == nil { return diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/google/btree/go.mod similarity index 57% rename from vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go rename to vendor/github.com/google/btree/go.mod index 6609e2877c0..fe4d5ca17b3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go +++ b/vendor/github.com/google/btree/go.mod @@ -1,9 +1,10 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2014 Google Inc. +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -11,12 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !go1.12 - -package prometheus +module github.com/google/btree -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before -// 1.12. Remove this whole file once the minimum supported Go version is 1.12. -func readBuildInfo() (path, version, sum string) { - return "unknown", "unknown", "unknown" -} +go 1.12 diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index a6c070cfcd9..104bb30538b 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -79,7 +79,7 @@ func (opts formatOptions) verbosity() uint { } } -const maxVerbosityPreset = 3 +const maxVerbosityPreset = 6 // verbosityPreset modifies the verbosity settings given an index // between 0 and maxVerbosityPreset, inclusive. @@ -100,7 +100,7 @@ func verbosityPreset(opts formatOptions, i int) formatOptions { func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { if opts.DiffMode == diffIdentical { opts = opts.WithVerbosity(1) - } else { + } else if opts.verbosity() < 3 { opts = opts.WithVerbosity(3) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index da04caf1649..168f92f3c12 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -26,8 +26,6 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // No differences detected case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid - case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): - return false // Both slice values have to be non-empty case v.NumIgnored > 0: return false // Some ignore option was used case v.NumTransformed > 0: @@ -45,7 +43,16 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } - switch t := v.Type; t.Kind() { + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { case reflect.String: case reflect.Array, reflect.Slice: // Only slices of primitive types have specialized handling. @@ -57,6 +64,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + // If a sufficient number of elements already differ, // use specialized formatting even if length requirement is not met. if v.NumDiff > v.NumSame { @@ -68,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { // Use specialized string diffing for longer slices or strings. const minLength = 64 - return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength + return vx.Len() >= minLength && vy.Len() >= minLength } // FormatDiffSlice prints a diff for the slices (or strings) represented by v. @@ -77,6 +89,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { assert(opts.DiffMode == diffUnknown) t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } // Auto-detect the type of the data. var isLinedText, isText, isBinary bool diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index 11a56490cf7..ea4ca7539e5 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,4 +1,33 @@ -## 0.16.0 (Unreleased) +## 0.18.0 (Unreleased) + +## 0.17.0 (April 9, 2021) + +IMPROVEMENTS + +* `networking/v2/extensions/quotas.QuotaDetail.Reserved` can handle both `int` and `string` values [GH-2126](https://github.com/gophercloud/gophercloud/pull/2126) +* Added `blockstorage/v3/volumetypes.ListExtraSpecs` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123) +* Added `blockstorage/v3/volumetypes.GetExtraSpec` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123) +* Added `blockstorage/v3/volumetypes.CreateExtraSpecs` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123) +* Added `blockstorage/v3/volumetypes.UpdateExtraSpec` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123) +* Added `blockstorage/v3/volumetypes.DeleteExtraSpec` [GH-2123](https://github.com/gophercloud/gophercloud/pull/2123) +* Added `identity/v3/roles.ListAssignmentOpts.IncludeNames` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133) +* Added `identity/v3/roles.AssignedRoles.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133) +* Added `identity/v3/roles.Domain.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133) +* Added `identity/v3/roles.Project.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133) +* Added `identity/v3/roles.User.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133) +* Added `identity/v3/roles.Group.Name` [GH-2133](https://github.com/gophercloud/gophercloud/pull/2133) +* Added `blockstorage/extensions/availabilityzones.List` [GH-2135](https://github.com/gophercloud/gophercloud/pull/2135) +* Added `blockstorage/v3/volumetypes.ListAccesses` [GH-2138](https://github.com/gophercloud/gophercloud/pull/2138) +* Added `blockstorage/v3/volumetypes.AddAccess` [GH-2138](https://github.com/gophercloud/gophercloud/pull/2138) +* Added `blockstorage/v3/volumetypes.RemoveAccess` [GH-2138](https://github.com/gophercloud/gophercloud/pull/2138) +* Added `blockstorage/v3/qos.Create` [GH-2140](https://github.com/gophercloud/gophercloud/pull/2140) +* Added `blockstorage/v3/qos.Delete` [GH-2140](https://github.com/gophercloud/gophercloud/pull/2140) + +## 0.16.0 (February 23, 2021) + +UPGRADE NOTES + +* `baremetal/v1/nodes.CleanStep.Interface` has changed from `string` to `StepInterface` [GH-2120](https://github.com/gophercloud/gophercloud/pull/2120) BUG FIXES @@ -21,6 +50,11 @@ IMPROVEMENTS * Added `containerinfra/v1/clusters.CreateOpts.MasterLBEnabled` [GH-2102](https://github.com/gophercloud/gophercloud/pull/2102) * Added the ability to define a custom function to handle "Retry-After" (429) responses [GH-2097](https://github.com/gophercloud/gophercloud/pull/2097) * Added `baremetal/v1/nodes.JBOD` constant for the `RAIDLevel` type [GH-2103](https://github.com/gophercloud/gophercloud/pull/2103) +* Added support for Block Storage quotas of volume typed resources [GH-2109](https://github.com/gophercloud/gophercloud/pull/2109) +* Added `blockstorage/extensions/volumeactions.ChangeType` [GH-2113](https://github.com/gophercloud/gophercloud/pull/2113) +* Added `baremetal/v1/nodes.DeployStep` [GH-2120](https://github.com/gophercloud/gophercloud/pull/2120) +* Added `baremetal/v1/nodes.ProvisionStateOpts.DeploySteps` [GH-2120](https://github.com/gophercloud/gophercloud/pull/2120) +* Added `baremetal/v1/nodes.CreateOpts.AutomatedClean` [GH-2122](https://github.com/gophercloud/gophercloud/pull/2122) ## 0.15.0 (December 27, 2020) diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go index d564d0693fb..17b283b5dee 100644 --- a/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go @@ -817,11 +817,9 @@ func NewServiceClient(service string, opts *ClientOpts) (*gophercloud.ServiceCli pClient.HTTPClient = *opts.HTTPClient } else { // Otherwise create a new HTTP client with the generated TLS config. - pClient.HTTPClient = http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, - } + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConfig + pClient.HTTPClient = http.Client{Transport: transport} } err = openstack.Authenticate(pClient, *ao) diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 00000000000..444df08f8e7 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 00000000000..44e368e5692 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,178 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 00000000000..c9b84022cf7 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 00000000000..b97cd6ed02b --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 00000000000..71dd308ed81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,150 @@ +# go-multierror + +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) + +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 00000000000..3e2589bfde0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,43 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 00000000000..aab8e9abec9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 00000000000..47f13c49a67 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 00000000000..141cc4ccb25 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-multierror + +go 1.13 + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 00000000000..e8238e9ec91 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,2 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 00000000000..9c29efb7f87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 00000000000..f5457432646 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,121 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. +// +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 00000000000..5c477abe44f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 00000000000..fecb14e81c5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index dad29725f86..d324c43ba4d 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 876abb500a4..aa8cbd7ce6d 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -97,7 +97,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [janoszen/containerssh](https://github.com/janoszen/containerssh) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) ## Install diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index afa84a1e298..8c2a8fcd901 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -95,13 +95,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } if src.Kind() != reflect.Map { diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 25dbe3e15f4..40b5802debd 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -440,8 +440,7 @@ func (d *compressor) deflateLazy() { // index and index-1 are already inserted. If there is not enough // lookahead, the last two strings are not inserted into the hash // table. - var newIndex int - newIndex = s.index + prevLength - 1 + newIndex := s.index + prevLength - 1 // Calculate missing hashes end := newIndex if end > s.maxInsertIndex { @@ -645,15 +644,15 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: - d.w.logNewTablePenalty = 4 - d.window = make([]byte, maxStoreBlockSize) + d.w.logNewTablePenalty = 8 + d.window = make([]byte, 32<<10) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff case level == DefaultCompression: level = 5 fallthrough case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 6 + d.w.logNewTablePenalty = 8 d.fast = newFastEnc(level) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 4a73e1bdd30..678f081052a 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -6,6 +6,7 @@ package flate import ( + "encoding/binary" "fmt" "math/bits" ) @@ -65,26 +66,15 @@ func load32(b []byte, i int) uint32 { } func load64(b []byte, i int) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) } func load3232(b []byte, i int32) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return binary.LittleEndian.Uint32(b[i:]) } func load6432(b []byte, i int32) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) } func hash(u uint32) uint32 { @@ -225,9 +215,9 @@ func (e *fastGen) Reset() { func matchLen(a, b []byte) int { b = b[:len(a)] var checked int - if len(a) > 4 { + if len(a) >= 4 { // Try 4 bytes first - if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 { return bits.TrailingZeros32(diff) >> 3 } // Switch to 8 byte matching. @@ -236,7 +226,7 @@ func matchLen(a, b []byte) int { b = b[4:] for len(a) >= 8 { b = b[:len(a)] - if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { return checked + (bits.TrailingZeros64(diff) >> 3) } checked += 8 @@ -247,7 +237,7 @@ func matchLen(a, b []byte) int { b = b[:len(a)] for i := range a { if a[i] != b[i] { - return int(i) + checked + return i + checked } } return len(a) + checked diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 208d66711df..db54be13989 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -5,6 +5,7 @@ package flate import ( + "encoding/binary" "io" ) @@ -206,7 +207,7 @@ func (w *huffmanBitWriter) write(b []byte) { } func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { - w.bits |= uint64(b) << (w.nbits & reg16SizeMask64) + w.bits |= uint64(b) << w.nbits w.nbits += nb if w.nbits >= 48 { w.writeOutBits() @@ -420,13 +421,11 @@ func (w *huffmanBitWriter) writeOutBits() { w.bits >>= 48 w.nbits -= 48 n := w.nbytes - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) + + // We over-write, but faster... + binary.LittleEndian.PutUint64(w.bytes[n:], bits) n += 6 + if n >= bufferFlushSize { if w.err != nil { n = 0 @@ -435,6 +434,7 @@ func (w *huffmanBitWriter) writeOutBits() { w.write(w.bytes[:n]) n = 0 } + w.nbytes = n } @@ -759,7 +759,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := lengths[lengthCode&31] - w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) + w.bits |= uint64(c.code) << w.nbits w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -779,7 +779,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := offs[offsetCode&31] - w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) + w.bits |= uint64(c.code) << w.nbits w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -830,8 +830,8 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Assume header is around 70 bytes: // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 - estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync) - estBits += w.lastHeader + 15 + estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + estBits += w.lastHeader + len(input)/32 if w.lastHeader == 0 { estBits += guessHeaderSizeBits } @@ -845,9 +845,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { return } + reuseSize := 0 if w.lastHeader > 0 { - reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256]) - estBits += estExtra + reuseSize = w.literalEncoding.bitLength(w.literalFreq[:256]) if estBits < reuseSize { // We owe an EOB @@ -859,6 +859,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { const numLiterals = endBlockMarker + 1 const numOffsets = 1 if w.lastHeader == 0 { + if !eof && !sync { + // Generate a slightly suboptimal tree that can be used for all. + fillHist(w.literalFreq[:numLiterals]) + } w.literalFreq[endBlockMarker] = 1 w.literalEncoding.generate(w.literalFreq[:numLiterals], 15) @@ -878,19 +882,14 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] - w.bits |= uint64(c.code) << ((w.nbits) & reg16SizeMask64) + w.bits |= uint64(c.code) << w.nbits w.nbits += c.len if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) + binary.LittleEndian.PutUint64(w.bytes[n:], bits) n += 6 if n >= bufferFlushSize { if w.err != nil { diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 4c39a301871..0d3445a1cc8 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -122,6 +122,16 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { return total } +func (h *huffmanEncoder) bitLengthRaw(b []byte) int { + var total int + for _, f := range b { + if f != 0 { + total += int(h.codes[f].len) + } + } + return total +} + // Return the number of literals assigned to each bit size in the Huffman encoding // // This method is only called when list.length >= 3 @@ -327,37 +337,40 @@ func atLeastOne(v float32) float32 { return v } +// Unassigned values are assigned '1' in the histogram. +func fillHist(b []uint16) { + for i, v := range b { + if v == 0 { + b[i] = 1 + } + } +} + // histogramSize accumulates a histogram of b in h. // An estimated size in bits is returned. -// Unassigned values are assigned '1' in the histogram. // len(h) must be >= 256, and h's elements must be all zeroes. -func histogramSize(b []byte, h []uint16, fill bool) (int, int) { +func histogramSize(b []byte, h []uint16, fill bool) (bits int) { h = h[:256] for _, t := range b { h[t]++ } - invTotal := 1.0 / float32(len(b)) - shannon := float32(0.0) - var extra float32 + total := len(b) if fill { - oneBits := atLeastOne(-mFastLog2(invTotal)) - for i, v := range h[:] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } else { - h[i] = 1 - extra += oneBits + for _, v := range h { + if v == 0 { + total++ } } - } else { - for _, v := range h[:] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } + } + + invTotal := 1.0 / float32(total) + shannon := float32(0.0) + for _, v := range h { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n } } - return int(shannon + 0.99), int(extra + 0.99) + return int(shannon + 0.99) } diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 5b986a1944e..234c4389ab3 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -155,7 +155,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Store every second hash in-between, but offset by 1. for i := s - l + 2; i < s-5; i += 7 { - x := load6432(src, int32(i)) + x := load6432(src, i) nextHash := hash4u(uint32(x), bTableBits) e.table[nextHash] = tableEntry{offset: e.cur + i} // Skip one diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index b69237c9b8f..6f341914c67 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -92,7 +92,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTra im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + first.deltaFindState c.state = c.stateTable[lu] - return } // encode the output symbol provided and write it to the bitstream. @@ -301,7 +300,7 @@ func (s *Scratch) writeCount() error { out[outP+1] = byte(bitStream >> 8) outP += (bitCount + 7) / 8 - if uint16(charnum) > s.symbolLen { + if charnum > s.symbolLen { return errors.New("internal error: charnum > s.symbolLen") } s.Out = out[:outP] @@ -331,7 +330,7 @@ type cTable struct { func (s *Scratch) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < int(tableSize) { + if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] @@ -565,8 +564,8 @@ func (s *Scratch) normalizeCount2() error { distributed uint32 total = uint32(s.br.remain()) tableLog = s.actualTableLog - lowThreshold = uint32(total >> tableLog) - lowOne = uint32((total * 3) >> (tableLog + 1)) + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { @@ -591,7 +590,7 @@ func (s *Scratch) normalizeCount2() error { if (total / toDistribute) > lowOne { // risk of rounding to zero - lowOne = uint32((total * 3) / (toDistribute * 2)) + lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 413ec3b3cd8..926f5f15356 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -172,7 +172,7 @@ type decSymbol struct { // allocDtable will allocate decoding tables if they are not big enough. func (s *Scratch) allocDtable() { tableSize := 1 << s.actualTableLog - if cap(s.decTable) < int(tableSize) { + if cap(s.decTable) < tableSize { s.decTable = make([]decSymbol, tableSize) } s.decTable = s.decTable[:tableSize] @@ -340,7 +340,7 @@ type decoder struct { func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { d.dt = dt d.br = in - d.state = uint16(in.getBits(tableLog)) + d.state = in.getBits(tableLog) } // next returns the next symbol and sets the next state. diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index f9ed5f8306e..0823c928cec 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -403,7 +403,7 @@ func (s *Scratch) buildCTable() error { var startNode = int16(s.symbolLen) nonNullRank := s.symbolLen - 1 - nodeNb := int16(startNode) + nodeNb := startNode huffNode := s.nodes[1 : huffNodesLen+1] // This overlays the slice above, but allows "-1" index lookups. @@ -536,7 +536,6 @@ func (s *Scratch) huffSort() { } nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} } - return } func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { @@ -580,7 +579,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // Get pos of last (smallest) symbol per rank { - currentNbBits := uint8(maxNbBits) + currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { if huffNode[pos].nbBits >= currentNbBits { continue diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd deleted file mode 100644 index d24eb4b47c3..00000000000 --- a/vendor/github.com/klauspost/compress/snappy/runbench.cmd +++ /dev/null @@ -1,2 +0,0 @@ -del old.txt -go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index c85c40255d5..e1be092f326 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -22,28 +22,44 @@ type blockEnc struct { dictLitEnc *huff0.Scratch wr bitWriter - extraLits int - last bool - + extraLits int output []byte recentOffsets [3]uint32 prevRecentOffsets [3]uint32 + + last bool + lowMem bool } // init should be used once the block has been created. // If called more than once, the effect is the same as calling reset. func (b *blockEnc) init() { - if cap(b.literals) < maxCompressedLiteralSize { - b.literals = make([]byte, 0, maxCompressedLiteralSize) - } - const defSeqs = 200 - b.literals = b.literals[:0] - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 200 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } } + if b.coders.mlEnc == nil { b.coders.mlEnc = &fseEncoder{} b.coders.mlPrev = &fseEncoder{} @@ -370,9 +386,9 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { b.output = bh.appendTo(b.output) b.output = append(b.output, lits[0]) return nil + case nil: default: return err - case nil: } // Compressed... // Now, allow reuse @@ -512,11 +528,6 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if debug { println("Adding literals RLE") } - default: - if debug { - println("Adding literals ERROR:", err) - } - return err case nil: // Compressed litLen... if reUsed { @@ -547,6 +558,11 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if debug { println("Adding literals compressed") } + default: + if debug { + println("Adding literals ERROR:", err) + } + return err } // Sequence compression diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 87896c5eaa7..69736e8d4bb 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -93,7 +93,7 @@ func (h *Header) Decode(in []byte) error { h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("Reserved bit set on frame header") + return errors.New("reserved bit set on frame header") } // Read Window_Descriptor @@ -174,7 +174,7 @@ func (h *Header) Decode(in []byte) error { if len(in) < 3 { return nil } - tmp, in := in[:3], in[3:] + tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) h.FirstBlock.Last = bh&1 != 0 blockType := blockType((bh >> 1) & 3) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 1d41c25d292..f593e464b66 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -179,8 +179,7 @@ func (d *Decoder) Reset(r io.Reader) error { // If bytes buffer and < 1MB, do sync decoding anyway. if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { - var bb2 byter - bb2 = bb + bb2 := bb if debug { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } @@ -237,20 +236,17 @@ func (d *Decoder) drainOutput() { println("current already flushed") return } - for { - select { - case v := <-d.current.output: - if v.d != nil { - if debug { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return + for v := range d.current.output { + if v.d != nil { + if debug { + printf("re-adding decoder %p", v.d) } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return } } } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 284d384492b..c0fd058c28f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,7 +6,6 @@ package zstd import ( "errors" - "fmt" "runtime" ) @@ -43,7 +42,7 @@ func WithDecoderLowmem(b bool) DOption { func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { if n <= 0 { - return fmt.Errorf("Concurrency must be at least 1") + return errors.New("concurrency must be at least 1") } o.concurrent = n return nil @@ -61,7 +60,7 @@ func WithDecoderMaxMemory(n uint64) DOption { return errors.New("WithDecoderMaxMemory must be at least 1") } if n > 1<<63 { - return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") } o.maxDecodedSize = n return nil diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index b1b7c6e6a72..60f29864864 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -7,6 +7,10 @@ import ( "github.com/klauspost/compress/zstd/internal/xxhash" ) +const ( + dictShardBits = 6 +) + type fastBase struct { // cur is the offset at the start of hist cur int32 @@ -17,6 +21,7 @@ type fastBase struct { tmp [8]byte blk *blockEnc lastDictID uint32 + lowMem bool } // CRC returns the underlying CRC writer. @@ -57,15 +62,10 @@ func (e *fastBase) addBlock(src []byte) int32 { // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { if cap(e.hist) == 0 { - l := e.maxMatchOff * 2 - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 - } - e.hist = make([]byte, 0, l) + e.ensureHist(len(src)) } else { - if cap(e.hist) < int(e.maxMatchOff*2) { - panic("unexpected buffer size") + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) } // Move down offset := int32(len(e.hist)) - e.maxMatchOff @@ -79,6 +79,28 @@ func (e *fastBase) addBlock(src []byte) int32 { return s } +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + // useBlock will replace the block with the provided one, // but transfer recent offsets from the previous. func (e *fastBase) UseBlock(enc *blockEnc) { @@ -117,7 +139,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { // Reset the encoding table. func (e *fastBase) resetBase(d *dict, singleBlock bool) { if e.blk == nil { - e.blk = &blockEnc{} + e.blk = &blockEnc{lowMem: e.lowMem} e.blk.init() } else { e.blk.reset(nil) @@ -128,14 +150,15 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { } else { e.crc.Reset() } - if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { - l := e.maxMatchOff*2 + int32(d.DictContentSize()) - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true } - e.hist = make([]byte, 0, l) + e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.lowMem = low } + // We offset current position so everything will be out of reach. // If above reset line, history will be purged. if e.cur < bufferReset { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c4baa42c648..fe3625c5f5e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -112,7 +112,7 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { // Override src src = e.hist sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 12 + const kSearchStrength = 10 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -186,9 +186,11 @@ encodeLoop: best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) - best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) - best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) - best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) + if best.length > 0 { + best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) + best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) + best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) + } } // Load next and check... e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} @@ -405,6 +407,7 @@ encodeLoop: // Most notable difference is that src will not be copied for history and // we do not need to check for max match length. func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) e.Encode(blk, src) } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 94a5343d00e..c2ce4a2bac5 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -16,6 +16,12 @@ const ( // This greatly depends on the type of input. betterShortTableBits = 13 // Bits used in the short match table betterShortTableSize = 1 << betterShortTableBits // Size of the table + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard ) type prevEntry struct { @@ -31,10 +37,17 @@ type prevEntry struct { // and that it is longer (lazy matching). type betterFastEncoder struct { fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry - dictTable []tableEntry - dictLongTable []prevEntry + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool } // Encode improves compression... @@ -516,11 +529,511 @@ encodeLoop: // Most notable difference is that src will not be copied for history and // we do not need to check for max match length. func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) e.Encode(blk, src) } +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + // ResetDict will reset and set a dictionary if not nil func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { return @@ -557,6 +1070,7 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } // Init or copy dict table @@ -585,11 +1099,72 @@ func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } + // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 19eebf66e50..8629d43d868 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -11,6 +11,9 @@ const ( dFastLongTableSize = 1 << dFastLongTableBits // Size of the table dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. @@ -18,8 +21,14 @@ const ( type doubleFastEncoder struct { fastEncoder - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool } // Encode mimmics functionality in zstd_dfast.c @@ -678,9 +687,379 @@ encodeLoop: } } +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hash8(cv0, dFastLongTableBits) + longHash2 := hash8(cv0, dFastLongTableBits) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hash5(cv0, dFastShortTableBits) + hashVal2 := hash5(cv1, dFastShortTableBits) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } @@ -706,8 +1085,37 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } // Reset table to initial state e.cur = e.maxMatchOff - copy(e.longTable[:], e.dictLongTable) + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 0b301df4390..ba4a17e1064 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -11,9 +11,11 @@ import ( ) const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. maxMatchLength = 131074 ) @@ -24,8 +26,14 @@ type tableEntry struct { type fastEncoder struct { fastBase - table [tableSize]tableEntry - dictTable []tableEntry + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool } // Encode mimmics functionality in zstd_fast.c @@ -78,7 +86,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 7 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -617,8 +625,322 @@ encodeLoop: } } +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + // ResetDict will reset and set a dictionary if not nil func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { return @@ -653,9 +975,44 @@ func (e *fastEncoder) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id + e.allDirty = true } e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index f5759211dac..4871dd03aff 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -106,7 +106,7 @@ func (e *Encoder) Reset(w io.Writer) { s.encoder = e.o.encoder() } if s.writing == nil { - s.writing = &blockEnc{} + s.writing = &blockEnc{lowMem: e.o.lowMem} s.writing.init() } s.writing.initNewEncode() @@ -176,6 +176,12 @@ func (e *Encoder) nextBlock(final bool) error { } if !s.headerWritten { // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } if final && len(s.filling) > 0 { s.current = e.EncodeAll(s.filling, s.current[:0]) var n2 int @@ -334,13 +340,13 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { println("ReadFrom: got EOF final block:", len(e.state.filling)) } return n, nil + case nil: default: if debug { println("ReadFrom: got error:", err) } e.state.err = err return n, err - case nil: } if len(src) > 0 { if debug { @@ -471,7 +477,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 { + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { dst = make([]byte, 0, len(src)) } dst, err := fh.appendTo(dst) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index a7312f42af7..16d4ab63c19 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -24,12 +24,12 @@ type encoderOptions struct { allLitEntropy bool customWindow bool customALEntropy bool + lowMem bool dict *dict } func (o *encoderOptions) setDefault() { *o = encoderOptions{ - // use less ram: true for now, but may change. concurrent: runtime.GOMAXPROCS(0), crc: true, single: nil, @@ -37,20 +37,31 @@ func (o *encoderOptions) setDefault() { windowSize: 8 << 20, level: SpeedDefault, allLitEntropy: true, + lowMem: false, } } // encoder returns an encoder with the selected options. func (o encoderOptions) encoder() encoder { switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedDefault: - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} case SpeedBetterCompression: - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} - case SpeedFastest: - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -62,7 +73,7 @@ func WithEncoderCRC(b bool) EOption { } // WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. +// meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { @@ -276,6 +287,17 @@ func WithSingleSegment(b bool) EOption { } } +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + // WithEncoderDict allows to register a dictionary that will be used for the encode. // The encoder *may* choose to use no dictionary instead for certain payloads. func WithEncoderDict(dict []byte) EOption { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index fc4a566d39a..693c5f05d2d 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -121,7 +121,7 @@ func (d *frameDec) reset(br byteBuffer) error { d.SingleSegment = fhd&(1<<5) != 0 if fhd&(1<<3) != 0 { - return errors.New("Reserved bit set on frame header") + return errors.New("reserved bit set on frame header") } // Read Window_Descriptor diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index aa9eba88b80..c74681b9998 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -97,7 +97,7 @@ func (s *fseEncoder) prepare() (*fseEncoder, error) { func (s *fseEncoder) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < int(tableSize) { + if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] @@ -202,13 +202,13 @@ func (s *fseEncoder) buildCTable() error { case 0: case -1, 1: symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int16(total - 1) + symbolTT[i].deltaFindState = total - 1 total++ default: maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) minStatePlus := uint32(v) << maxBitsOut symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int16(total - v) + symbolTT[i].deltaFindState = total - v total += v } } @@ -353,8 +353,8 @@ func (s *fseEncoder) normalizeCount2(length int) error { distributed uint32 total = uint32(length) tableLog = s.actualTableLog - lowThreshold = uint32(total >> tableLog) - lowOne = uint32((total * 3) >> (tableLog + 1)) + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { @@ -379,7 +379,7 @@ func (s *fseEncoder) normalizeCount2(length int) error { if (total / toDistribute) > lowOne { // risk of rounding to zero - lowOne = uint32((total * 3) / (toDistribute * 2)) + lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 @@ -708,7 +708,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + int32(first.deltaFindState) c.state = c.stateTable[lu] - return } // encode the output symbol provided and write it to the bitstream. diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go index 6c17dc17f4f..474cb77d2b9 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -59,7 +59,7 @@ func fillBase(dst []baseOffset, base uint32, bits ...uint8) { } for i, bit := range bits { if base > math.MaxInt32 { - panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + panic("invalid decoding table, base overflows int32") } dst[i] = baseOffset{ diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go index 36bcc3cc02e..8014174a771 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -35,7 +35,6 @@ func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { // Ensure we cannot reuse by accident prevEnc := *prev prevEnc.symbolLen = 0 - return } compareSwap(ll, &s.llEnc, &s.llPrev) compareSwap(ml, &s.mlEnc, &s.mlPrev) diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 841fd95acce..9d9d1d567e6 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -10,8 +10,8 @@ import ( "hash/crc32" "io" + "github.com/golang/snappy" "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/snappy" ) const ( @@ -185,7 +185,6 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.block.reset(nil) r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) if err != nil { - println("snappy.Decode:", err) return written, err } err = r.block.encodeLits(r.block.literals, false) @@ -417,7 +416,7 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli) // https://github.com/google/snappy/blob/master/framing_format.txt func snappyCRC(b []byte) uint32 { c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 + return c>>15 | c<<17 + 0xa282ead8 } // snappyDecodedLen returns the length of the decoded block and the number of bytes diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 9056beef271..1ba308c8bf7 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -5,6 +5,7 @@ package zstd import ( "bytes" + "encoding/binary" "errors" "log" "math" @@ -126,26 +127,15 @@ func matchLen(a, b []byte) int { } func load3232(b []byte, i int32) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return binary.LittleEndian.Uint32(b[i:]) } func load6432(b []byte, i int32) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) } func load64(b []byte, i int) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return binary.LittleEndian.Uint64(b[i:]) } type byter interface { diff --git a/vendor/github.com/magefile/mage/LICENSE b/vendor/github.com/magefile/mage/LICENSE deleted file mode 100644 index d0632bc1458..00000000000 --- a/vendor/github.com/magefile/mage/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2017 the Mage authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/magefile/mage/mg/color.go b/vendor/github.com/magefile/mage/mg/color.go deleted file mode 100644 index 3e27103325a..00000000000 --- a/vendor/github.com/magefile/mage/mg/color.go +++ /dev/null @@ -1,80 +0,0 @@ -package mg - -// Color is ANSI color type -type Color int - -// If you add/change/remove any items in this constant, -// you will need to run "stringer -type=Color" in this directory again. -// NOTE: Please keep the list in an alphabetical order. -const ( - Black Color = iota - Red - Green - Yellow - Blue - Magenta - Cyan - White - BrightBlack - BrightRed - BrightGreen - BrightYellow - BrightBlue - BrightMagenta - BrightCyan - BrightWhite -) - -// AnsiColor are ANSI color codes for supported terminal colors. -var ansiColor = map[Color]string{ - Black: "\u001b[30m", - Red: "\u001b[31m", - Green: "\u001b[32m", - Yellow: "\u001b[33m", - Blue: "\u001b[34m", - Magenta: "\u001b[35m", - Cyan: "\u001b[36m", - White: "\u001b[37m", - BrightBlack: "\u001b[30;1m", - BrightRed: "\u001b[31;1m", - BrightGreen: "\u001b[32;1m", - BrightYellow: "\u001b[33;1m", - BrightBlue: "\u001b[34;1m", - BrightMagenta: "\u001b[35;1m", - BrightCyan: "\u001b[36;1m", - BrightWhite: "\u001b[37;1m", -} - -// AnsiColorReset is an ANSI color code to reset the terminal color. -const AnsiColorReset = "\033[0m" - -// DefaultTargetAnsiColor is a default ANSI color for colorizing targets. -// It is set to Cyan as an arbitrary color, because it has a neutral meaning -var DefaultTargetAnsiColor = ansiColor[Cyan] - -func toLowerCase(s string) string { - // this is a naive implementation - // borrowed from https://golang.org/src/strings/strings.go - // and only considers alphabetical characters [a-zA-Z] - // so that we don't depend on the "strings" package - buf := make([]byte, len(s)) - for i := 0; i < len(s); i++ { - c := s[i] - if 'A' <= c && c <= 'Z' { - c += 'a' - 'A' - } - buf[i] = c - } - return string(buf) -} - -func getAnsiColor(color string) (string, bool) { - colorLower := toLowerCase(color) - for k, v := range ansiColor { - colorConstLower := toLowerCase(k.String()) - if colorConstLower == colorLower { - return v, true - } - } - return "", false -} diff --git a/vendor/github.com/magefile/mage/mg/color_string.go b/vendor/github.com/magefile/mage/mg/color_string.go deleted file mode 100644 index 06debca5404..00000000000 --- a/vendor/github.com/magefile/mage/mg/color_string.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by "stringer -type=Color"; DO NOT EDIT. - -package mg - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Black-0] - _ = x[Red-1] - _ = x[Green-2] - _ = x[Yellow-3] - _ = x[Blue-4] - _ = x[Magenta-5] - _ = x[Cyan-6] - _ = x[White-7] - _ = x[BrightBlack-8] - _ = x[BrightRed-9] - _ = x[BrightGreen-10] - _ = x[BrightYellow-11] - _ = x[BrightBlue-12] - _ = x[BrightMagenta-13] - _ = x[BrightCyan-14] - _ = x[BrightWhite-15] -} - -const _Color_name = "BlackRedGreenYellowBlueMagentaCyanWhiteBrightBlackBrightRedBrightGreenBrightYellowBrightBlueBrightMagentaBrightCyanBrightWhite" - -var _Color_index = [...]uint8{0, 5, 8, 13, 19, 23, 30, 34, 39, 50, 59, 70, 82, 92, 105, 115, 126} - -func (i Color) String() string { - if i < 0 || i >= Color(len(_Color_index)-1) { - return "Color(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Color_name[_Color_index[i]:_Color_index[i+1]] -} diff --git a/vendor/github.com/magefile/mage/mg/deps.go b/vendor/github.com/magefile/mage/mg/deps.go deleted file mode 100644 index cae591d340d..00000000000 --- a/vendor/github.com/magefile/mage/mg/deps.go +++ /dev/null @@ -1,204 +0,0 @@ -package mg - -import ( - "context" - "fmt" - "log" - "os" - "reflect" - "runtime" - "strings" - "sync" -) - -var logger = log.New(os.Stderr, "", 0) - -type onceMap struct { - mu *sync.Mutex - m map[onceKey]*onceFun -} - -type onceKey struct { - Name string - ID string -} - -func (o *onceMap) LoadOrStore(f Fn) *onceFun { - defer o.mu.Unlock() - o.mu.Lock() - - key := onceKey{ - Name: f.Name(), - ID: f.ID(), - } - existing, ok := o.m[key] - if ok { - return existing - } - one := &onceFun{ - once: &sync.Once{}, - fn: f, - displayName: displayName(f.Name()), - } - o.m[key] = one - return one -} - -var onces = &onceMap{ - mu: &sync.Mutex{}, - m: map[onceKey]*onceFun{}, -} - -// SerialDeps is like Deps except it runs each dependency serially, instead of -// in parallel. This can be useful for resource intensive dependencies that -// shouldn't be run at the same time. -func SerialDeps(fns ...interface{}) { - funcs := checkFns(fns) - ctx := context.Background() - for i := range fns { - runDeps(ctx, funcs[i:i+1]) - } -} - -// SerialCtxDeps is like CtxDeps except it runs each dependency serially, -// instead of in parallel. This can be useful for resource intensive -// dependencies that shouldn't be run at the same time. -func SerialCtxDeps(ctx context.Context, fns ...interface{}) { - funcs := checkFns(fns) - for i := range fns { - runDeps(ctx, funcs[i:i+1]) - } -} - -// CtxDeps runs the given functions as dependencies of the calling function. -// Dependencies must only be of type: -// func() -// func() error -// func(context.Context) -// func(context.Context) error -// Or a similar method on a mg.Namespace type. -// Or an mg.Fn interface. -// -// The function calling Deps is guaranteed that all dependent functions will be -// run exactly once when Deps returns. Dependent functions may in turn declare -// their own dependencies using Deps. Each dependency is run in their own -// goroutines. Each function is given the context provided if the function -// prototype allows for it. -func CtxDeps(ctx context.Context, fns ...interface{}) { - funcs := checkFns(fns) - runDeps(ctx, funcs) -} - -// runDeps assumes you've already called checkFns. -func runDeps(ctx context.Context, fns []Fn) { - mu := &sync.Mutex{} - var errs []string - var exit int - wg := &sync.WaitGroup{} - for _, f := range fns { - fn := onces.LoadOrStore(f) - wg.Add(1) - go func() { - defer func() { - if v := recover(); v != nil { - mu.Lock() - if err, ok := v.(error); ok { - exit = changeExit(exit, ExitStatus(err)) - } else { - exit = changeExit(exit, 1) - } - errs = append(errs, fmt.Sprint(v)) - mu.Unlock() - } - wg.Done() - }() - if err := fn.run(ctx); err != nil { - mu.Lock() - errs = append(errs, fmt.Sprint(err)) - exit = changeExit(exit, ExitStatus(err)) - mu.Unlock() - } - }() - } - - wg.Wait() - if len(errs) > 0 { - panic(Fatal(exit, strings.Join(errs, "\n"))) - } -} - -func checkFns(fns []interface{}) []Fn { - funcs := make([]Fn, len(fns)) - for i, f := range fns { - if fn, ok := f.(Fn); ok { - funcs[i] = fn - continue - } - funcs[i] = F(f) - } - return funcs -} - -// Deps runs the given functions in parallel, exactly once. Dependencies must -// only be of type: -// func() -// func() error -// func(context.Context) -// func(context.Context) error -// Or a similar method on a mg.Namespace type. -// Or an mg.Fn interface. -// -// This is a way to build up a tree of dependencies with each dependency -// defining its own dependencies. Functions must have the same signature as a -// Mage target, i.e. optional context argument, optional error return. -func Deps(fns ...interface{}) { - CtxDeps(context.Background(), fns...) -} - -func changeExit(old, new int) int { - if new == 0 { - return old - } - if old == 0 { - return new - } - if old == new { - return old - } - // both different and both non-zero, just set - // exit to 1. Nothing more we can do. - return 1 -} - -// funcName returns the unique name for the function -func funcName(i interface{}) string { - return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() -} - -func displayName(name string) string { - splitByPackage := strings.Split(name, ".") - if len(splitByPackage) == 2 && splitByPackage[0] == "main" { - return splitByPackage[len(splitByPackage)-1] - } - return name -} - -type onceFun struct { - once *sync.Once - fn Fn - err error - - displayName string -} - -// run will run the function exactly once and capture the error output. Further runs simply return -// the same error output. -func (o *onceFun) run(ctx context.Context) error { - o.once.Do(func() { - if Verbose() { - logger.Println("Running dependency:", displayName(o.fn.Name())) - } - o.err = o.fn.Run(ctx) - }) - return o.err -} diff --git a/vendor/github.com/magefile/mage/mg/errors.go b/vendor/github.com/magefile/mage/mg/errors.go deleted file mode 100644 index 2dd780fe3db..00000000000 --- a/vendor/github.com/magefile/mage/mg/errors.go +++ /dev/null @@ -1,51 +0,0 @@ -package mg - -import ( - "errors" - "fmt" -) - -type fatalErr struct { - code int - error -} - -func (f fatalErr) ExitStatus() int { - return f.code -} - -type exitStatus interface { - ExitStatus() int -} - -// Fatal returns an error that will cause mage to print out the -// given args and exit with the given exit code. -func Fatal(code int, args ...interface{}) error { - return fatalErr{ - code: code, - error: errors.New(fmt.Sprint(args...)), - } -} - -// Fatalf returns an error that will cause mage to print out the -// given message and exit with the given exit code. -func Fatalf(code int, format string, args ...interface{}) error { - return fatalErr{ - code: code, - error: fmt.Errorf(format, args...), - } -} - -// ExitStatus queries the error for an exit status. If the error is nil, it -// returns 0. If the error does not implement ExitStatus() int, it returns 1. -// Otherwise it retiurns the value from ExitStatus(). -func ExitStatus(err error) int { - if err == nil { - return 0 - } - exit, ok := err.(exitStatus) - if !ok { - return 1 - } - return exit.ExitStatus() -} diff --git a/vendor/github.com/magefile/mage/mg/fn.go b/vendor/github.com/magefile/mage/mg/fn.go deleted file mode 100644 index 57376081139..00000000000 --- a/vendor/github.com/magefile/mage/mg/fn.go +++ /dev/null @@ -1,181 +0,0 @@ -package mg - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "time" -) - -// Fn represents a function that can be run with mg.Deps. Package, Name, and ID must combine to -// uniquely identify a function, while ensuring the "same" function has identical values. These are -// used as a map key to find and run (or not run) the function. -type Fn interface { - // Name should return the fully qualified name of the function. Usually - // it's best to use runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(). - Name() string - - // ID should be an additional uniqueness qualifier in case the name is insufficiently unique. - // This can be the case for functions that take arguments (mg.F json-encodes an array of the - // args). - ID() string - - // Run should run the function. - Run(ctx context.Context) error -} - -// F takes a function that is compatible as a mage target, and any args that need to be passed to -// it, and wraps it in an mg.Fn that mg.Deps can run. Args must be passed in the same order as they -// are declared by the function. Note that you do not need to and should not pass a context.Context -// to F, even if the target takes a context. Compatible args are int, bool, string, and -// time.Duration. -func F(target interface{}, args ...interface{}) Fn { - hasContext, isNamespace, err := checkF(target, args) - if err != nil { - panic(err) - } - id, err := json.Marshal(args) - if err != nil { - panic(fmt.Errorf("can't convert args into a mage-compatible id for mg.Deps: %s", err)) - } - return fn{ - name: funcName(target), - id: string(id), - f: func(ctx context.Context) error { - v := reflect.ValueOf(target) - count := len(args) - if hasContext { - count++ - } - if isNamespace { - count++ - } - vargs := make([]reflect.Value, count) - x := 0 - if isNamespace { - vargs[0] = reflect.ValueOf(struct{}{}) - x++ - } - if hasContext { - vargs[x] = reflect.ValueOf(ctx) - x++ - } - for y := range args { - vargs[x+y] = reflect.ValueOf(args[y]) - } - ret := v.Call(vargs) - if len(ret) > 0 { - // we only allow functions with a single error return, so this should be safe. - if ret[0].IsNil() { - return nil - } - return ret[0].Interface().(error) - } - return nil - }, - } -} - -type fn struct { - name string - id string - f func(ctx context.Context) error -} - -// Name returns the fully qualified name of the function. -func (f fn) Name() string { - return f.name -} - -// ID returns a hash of the argument values passed in -func (f fn) ID() string { - return f.id -} - -// Run runs the function. -func (f fn) Run(ctx context.Context) error { - return f.f(ctx) -} - -func checkF(target interface{}, args []interface{}) (hasContext, isNamespace bool, _ error) { - t := reflect.TypeOf(target) - if t.Kind() != reflect.Func { - return false, false, fmt.Errorf("non-function passed to mg.F: %T", target) - } - - if t.NumOut() > 1 { - return false, false, fmt.Errorf("target has too many return values, must be zero or just an error: %T", target) - } - if t.NumOut() == 1 && t.Out(0) != errType { - return false, false, fmt.Errorf("target's return value is not an error") - } - - // more inputs than slots is always an error - if len(args) > t.NumIn() { - return false, false, fmt.Errorf("too many arguments for target, got %d for %T", len(args), target) - } - - if t.NumIn() == 0 { - return false, false, nil - } - - x := 0 - inputs := t.NumIn() - - if t.In(0).AssignableTo(emptyType) { - // nameSpace func - isNamespace = true - x++ - // callers must leave off the namespace value - inputs-- - } - if t.NumIn() > x && t.In(x) == ctxType { - // callers must leave off the context - inputs-- - - // let the upper function know it should pass us a context. - hasContext = true - - // skip checking the first argument in the below loop if it's a context, since first arg is - // special. - x++ - } - - if len(args) != inputs { - return false, false, fmt.Errorf("wrong number of arguments for target, got %d for %T", len(args), target) - } - - for _, arg := range args { - argT := t.In(x) - if !argTypes[argT] { - return false, false, fmt.Errorf("argument %d (%s), is not a supported argument type", x, argT) - } - passedT := reflect.TypeOf(arg) - if argT != passedT { - return false, false, fmt.Errorf("argument %d expected to be %s, but is %s", x, argT, passedT) - } - x++ - } - return hasContext, isNamespace, nil -} - -// Here we define the types that are supported as arguments/returns -var ( - ctxType = reflect.TypeOf(func(context.Context) {}).In(0) - errType = reflect.TypeOf(func() error { return nil }).Out(0) - emptyType = reflect.TypeOf(struct{}{}) - - intType = reflect.TypeOf(int(0)) - stringType = reflect.TypeOf(string("")) - boolType = reflect.TypeOf(bool(false)) - durType = reflect.TypeOf(time.Second) - - // don't put ctx in here, this is for non-context types - argTypes = map[reflect.Type]bool{ - intType: true, - boolType: true, - stringType: true, - durType: true, - } -) diff --git a/vendor/github.com/magefile/mage/mg/runtime.go b/vendor/github.com/magefile/mage/mg/runtime.go deleted file mode 100644 index 9a8de12ce71..00000000000 --- a/vendor/github.com/magefile/mage/mg/runtime.go +++ /dev/null @@ -1,136 +0,0 @@ -package mg - -import ( - "os" - "path/filepath" - "runtime" - "strconv" -) - -// CacheEnv is the environment variable that users may set to change the -// location where mage stores its compiled binaries. -const CacheEnv = "MAGEFILE_CACHE" - -// VerboseEnv is the environment variable that indicates the user requested -// verbose mode when running a magefile. -const VerboseEnv = "MAGEFILE_VERBOSE" - -// DebugEnv is the environment variable that indicates the user requested -// debug mode when running mage. -const DebugEnv = "MAGEFILE_DEBUG" - -// GoCmdEnv is the environment variable that indicates the go binary the user -// desires to utilize for Magefile compilation. -const GoCmdEnv = "MAGEFILE_GOCMD" - -// IgnoreDefaultEnv is the environment variable that indicates the user requested -// to ignore the default target specified in the magefile. -const IgnoreDefaultEnv = "MAGEFILE_IGNOREDEFAULT" - -// HashFastEnv is the environment variable that indicates the user requested to -// use a quick hash of magefiles to determine whether or not the magefile binary -// needs to be rebuilt. This results in faster runtimes, but means that mage -// will fail to rebuild if a dependency has changed. To force a rebuild, run -// mage with the -f flag. -const HashFastEnv = "MAGEFILE_HASHFAST" - -// EnableColorEnv is the environment variable that indicates the user is using -// a terminal which supports a color output. The default is false for backwards -// compatibility. When the value is true and the detected terminal does support colors -// then the list of mage targets will be displayed in ANSI color. When the value -// is true but the detected terminal does not support colors, then the list of -// mage targets will be displayed in the default colors (e.g. black and white). -const EnableColorEnv = "MAGEFILE_ENABLE_COLOR" - -// TargetColorEnv is the environment variable that indicates which ANSI color -// should be used to colorize mage targets. This is only applicable when -// the MAGEFILE_ENABLE_COLOR environment variable is true. -// The supported ANSI color names are any of these: -// - Black -// - Red -// - Green -// - Yellow -// - Blue -// - Magenta -// - Cyan -// - White -// - BrightBlack -// - BrightRed -// - BrightGreen -// - BrightYellow -// - BrightBlue -// - BrightMagenta -// - BrightCyan -// - BrightWhite -const TargetColorEnv = "MAGEFILE_TARGET_COLOR" - -// Verbose reports whether a magefile was run with the verbose flag. -func Verbose() bool { - b, _ := strconv.ParseBool(os.Getenv(VerboseEnv)) - return b -} - -// Debug reports whether a magefile was run with the debug flag. -func Debug() bool { - b, _ := strconv.ParseBool(os.Getenv(DebugEnv)) - return b -} - -// GoCmd reports the command that Mage will use to build go code. By default mage runs -// the "go" binary in the PATH. -func GoCmd() string { - if cmd := os.Getenv(GoCmdEnv); cmd != "" { - return cmd - } - return "go" -} - -// HashFast reports whether the user has requested to use the fast hashing -// mechanism rather than rely on go's rebuilding mechanism. -func HashFast() bool { - b, _ := strconv.ParseBool(os.Getenv(HashFastEnv)) - return b -} - -// IgnoreDefault reports whether the user has requested to ignore the default target -// in the magefile. -func IgnoreDefault() bool { - b, _ := strconv.ParseBool(os.Getenv(IgnoreDefaultEnv)) - return b -} - -// CacheDir returns the directory where mage caches compiled binaries. It -// defaults to $HOME/.magefile, but may be overridden by the MAGEFILE_CACHE -// environment variable. -func CacheDir() string { - d := os.Getenv(CacheEnv) - if d != "" { - return d - } - switch runtime.GOOS { - case "windows": - return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"), "magefile") - default: - return filepath.Join(os.Getenv("HOME"), ".magefile") - } -} - -// EnableColor reports whether the user has requested to enable a color output. -func EnableColor() bool { - b, _ := strconv.ParseBool(os.Getenv(EnableColorEnv)) - return b -} - -// TargetColor returns the configured ANSI color name a color output. -func TargetColor() string { - s, exists := os.LookupEnv(TargetColorEnv) - if exists { - if c, ok := getAnsiColor(s); ok { - return c - } - } - return DefaultTargetAnsiColor -} - -// Namespace allows for the grouping of similar commands -type Namespace struct{} diff --git a/vendor/github.com/magefile/mage/sh/cmd.go b/vendor/github.com/magefile/mage/sh/cmd.go deleted file mode 100644 index 06af62de2d2..00000000000 --- a/vendor/github.com/magefile/mage/sh/cmd.go +++ /dev/null @@ -1,177 +0,0 @@ -package sh - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "os/exec" - "strings" - - "github.com/magefile/mage/mg" -) - -// RunCmd returns a function that will call Run with the given command. This is -// useful for creating command aliases to make your scripts easier to read, like -// this: -// -// // in a helper file somewhere -// var g0 = sh.RunCmd("go") // go is a keyword :( -// -// // somewhere in your main code -// if err := g0("install", "github.com/gohugo/hugo"); err != nil { -// return err -// } -// -// Args passed to command get baked in as args to the command when you run it. -// Any args passed in when you run the returned function will be appended to the -// original args. For example, this is equivalent to the above: -// -// var goInstall = sh.RunCmd("go", "install") goInstall("github.com/gohugo/hugo") -// -// RunCmd uses Exec underneath, so see those docs for more details. -func RunCmd(cmd string, args ...string) func(args ...string) error { - return func(args2 ...string) error { - return Run(cmd, append(args, args2...)...) - } -} - -// OutCmd is like RunCmd except the command returns the output of the -// command. -func OutCmd(cmd string, args ...string) func(args ...string) (string, error) { - return func(args2 ...string) (string, error) { - return Output(cmd, append(args, args2...)...) - } -} - -// Run is like RunWith, but doesn't specify any environment variables. -func Run(cmd string, args ...string) error { - return RunWith(nil, cmd, args...) -} - -// RunV is like Run, but always sends the command's stdout to os.Stdout. -func RunV(cmd string, args ...string) error { - _, err := Exec(nil, os.Stdout, os.Stderr, cmd, args...) - return err -} - -// RunWith runs the given command, directing stderr to this program's stderr and -// printing stdout to stdout if mage was run with -v. It adds adds env to the -// environment variables for the command being run. Environment variables should -// be in the format name=value. -func RunWith(env map[string]string, cmd string, args ...string) error { - var output io.Writer - if mg.Verbose() { - output = os.Stdout - } - _, err := Exec(env, output, os.Stderr, cmd, args...) - return err -} - -// RunWithV is like RunWith, but always sends the command's stdout to os.Stdout. -func RunWithV(env map[string]string, cmd string, args ...string) error { - _, err := Exec(env, os.Stdout, os.Stderr, cmd, args...) - return err -} - -// Output runs the command and returns the text from stdout. -func Output(cmd string, args ...string) (string, error) { - buf := &bytes.Buffer{} - _, err := Exec(nil, buf, os.Stderr, cmd, args...) - return strings.TrimSuffix(buf.String(), "\n"), err -} - -// OutputWith is like RunWith, but returns what is written to stdout. -func OutputWith(env map[string]string, cmd string, args ...string) (string, error) { - buf := &bytes.Buffer{} - _, err := Exec(env, buf, os.Stderr, cmd, args...) - return strings.TrimSuffix(buf.String(), "\n"), err -} - -// Exec executes the command, piping its stderr to mage's stderr and -// piping its stdout to the given writer. If the command fails, it will return -// an error that, if returned from a target or mg.Deps call, will cause mage to -// exit with the same code as the command failed with. Env is a list of -// environment variables to set when running the command, these override the -// current environment variables set (which are also passed to the command). cmd -// and args may include references to environment variables in $FOO format, in -// which case these will be expanded before the command is run. -// -// Ran reports if the command ran (rather than was not found or not executable). -// Code reports the exit code the command returned if it ran. If err == nil, ran -// is always true and code is always 0. -func Exec(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, err error) { - expand := func(s string) string { - s2, ok := env[s] - if ok { - return s2 - } - return os.Getenv(s) - } - cmd = os.Expand(cmd, expand) - for i := range args { - args[i] = os.Expand(args[i], expand) - } - ran, code, err := run(env, stdout, stderr, cmd, args...) - if err == nil { - return true, nil - } - if ran { - return ran, mg.Fatalf(code, `running "%s %s" failed with exit code %d`, cmd, strings.Join(args, " "), code) - } - return ran, fmt.Errorf(`failed to run "%s %s: %v"`, cmd, strings.Join(args, " "), err) -} - -func run(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, code int, err error) { - c := exec.Command(cmd, args...) - c.Env = os.Environ() - for k, v := range env { - c.Env = append(c.Env, k+"="+v) - } - c.Stderr = stderr - c.Stdout = stdout - c.Stdin = os.Stdin - log.Println("exec:", cmd, strings.Join(args, " ")) - err = c.Run() - return CmdRan(err), ExitStatus(err), err -} - -// CmdRan examines the error to determine if it was generated as a result of a -// command running via os/exec.Command. If the error is nil, or the command ran -// (even if it exited with a non-zero exit code), CmdRan reports true. If the -// error is an unrecognized type, or it is an error from exec.Command that says -// the command failed to run (usually due to the command not existing or not -// being executable), it reports false. -func CmdRan(err error) bool { - if err == nil { - return true - } - ee, ok := err.(*exec.ExitError) - if ok { - return ee.Exited() - } - return false -} - -type exitStatus interface { - ExitStatus() int -} - -// ExitStatus returns the exit status of the error if it is an exec.ExitError -// or if it implements ExitStatus() int. -// 0 if it is nil or 1 if it is a different error. -func ExitStatus(err error) int { - if err == nil { - return 0 - } - if e, ok := err.(exitStatus); ok { - return e.ExitStatus() - } - if e, ok := err.(*exec.ExitError); ok { - if ex, ok := e.Sys().(exitStatus); ok { - return ex.ExitStatus() - } - } - return 1 -} diff --git a/vendor/github.com/magefile/mage/sh/helpers.go b/vendor/github.com/magefile/mage/sh/helpers.go deleted file mode 100644 index f5d20a2712b..00000000000 --- a/vendor/github.com/magefile/mage/sh/helpers.go +++ /dev/null @@ -1,40 +0,0 @@ -package sh - -import ( - "fmt" - "io" - "os" -) - -// Rm removes the given file or directory even if non-empty. It will not return -// an error if the target doesn't exist, only if the target cannot be removed. -func Rm(path string) error { - err := os.RemoveAll(path) - if err == nil || os.IsNotExist(err) { - return nil - } - return fmt.Errorf(`failed to remove %s: %v`, path, err) -} - -// Copy robustly copies the source file to the destination, overwriting the destination if necessary. -func Copy(dst string, src string) error { - from, err := os.Open(src) - if err != nil { - return fmt.Errorf(`can't copy %s: %v`, src, err) - } - defer from.Close() - finfo, err := from.Stat() - if err != nil { - return fmt.Errorf(`can't stat %s: %v`, src, err) - } - to, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, finfo.Mode()) - if err != nil { - return fmt.Errorf(`can't copy to %s: %v`, dst, err) - } - defer to.Close() - _, err = io.Copy(to, from) - if err != nil { - return fmt.Errorf(`error copying %s to %s: %v`, src, dst, err) - } - return nil -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go index f3871a624ac..3d7fa560b87 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -12,8 +12,14 @@ var ( // EastAsianWidth will be set true if the current locale is CJK EastAsianWidth bool + // StrictEmojiNeutral should be set false if handle broken fonts + StrictEmojiNeutral bool = true + // DefaultCondition is a condition in current locale - DefaultCondition = &Condition{} + DefaultCondition = &Condition{ + EastAsianWidth: false, + StrictEmojiNeutral: true, + } ) func init() { @@ -83,26 +89,52 @@ var nonprint = table{ // Condition have flag EastAsianWidth whether the current locale is CJK or not. type Condition struct { - EastAsianWidth bool + EastAsianWidth bool + StrictEmojiNeutral bool } // NewCondition return new instance of Condition which is current locale. func NewCondition() *Condition { return &Condition{ - EastAsianWidth: EastAsianWidth, + EastAsianWidth: EastAsianWidth, + StrictEmojiNeutral: StrictEmojiNeutral, } } // RuneWidth returns the number of cells in r. // See http://www.unicode.org/reports/tr11/ func (c *Condition) RuneWidth(r rune) int { - switch { - case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned): - return 0 - case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth): - return 2 - default: - return 1 + // optimized version, verified by TestRuneWidthChecksums() + if !c.EastAsianWidth { + switch { + case r < 0x20 || r > 0x10FFFF: + return 0 + case (r >= 0x7F && r <= 0x9F) || r == 0xAD: // nonprint + return 0 + case r < 0x300: + return 1 + case inTable(r, narrow): + return 1 + case inTables(r, nonprint, combining): + return 0 + case inTable(r, doublewidth): + return 2 + default: + return 1 + } + } else { + switch { + case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining): + return 0 + case inTable(r, narrow): + return 1 + case inTables(r, ambiguous, doublewidth): + return 2 + case !c.StrictEmojiNeutral && inTables(r, ambiguous, emoji, narrow): + return 2 + default: + return 1 + } } } diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go index b27d77d8911..e5d890c266f 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth_table.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -124,8 +124,10 @@ var ambiguous = table{ {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, } -var notassigned = table{ - {0x27E6, 0x27ED}, {0x2985, 0x2986}, +var narrow = table{ + {0x0020, 0x007E}, {0x00A2, 0x00A3}, {0x00A5, 0x00A6}, + {0x00AC, 0x00AC}, {0x00AF, 0x00AF}, {0x27E6, 0x27ED}, + {0x2985, 0x2986}, } var neutral = table{ diff --git a/vendor/github.com/mitchellh/go-wordwrap/go.mod b/vendor/github.com/mitchellh/go-wordwrap/go.mod index 2ae411b2012..431d615d47a 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/go.mod +++ b/vendor/github.com/mitchellh/go-wordwrap/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/go-wordwrap + +go 1.14 diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go index ac67205bc2e..f7bedda3882 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -5,6 +5,8 @@ import ( "unicode" ) +const nbsp = 0xA0 + // WrapString wraps the given string within lim width in characters. // // Wrapping is currently naive and only happens at white-space. A future @@ -18,50 +20,58 @@ func WrapString(s string, lim uint) string { var current uint var wordBuf, spaceBuf bytes.Buffer + var wordBufLen, spaceBufLen uint for _, char := range s { if char == '\n' { if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) > lim { + if current+spaceBufLen > lim { current = 0 } else { - current += uint(spaceBuf.Len()) + current += spaceBufLen spaceBuf.WriteTo(buf) } spaceBuf.Reset() + spaceBufLen = 0 } else { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } buf.WriteRune(char) current = 0 - } else if unicode.IsSpace(char) { + } else if unicode.IsSpace(char) && char != nbsp { if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { - current += uint(spaceBuf.Len() + wordBuf.Len()) + current += spaceBufLen + wordBufLen spaceBuf.WriteTo(buf) spaceBuf.Reset() + spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() + wordBufLen = 0 } spaceBuf.WriteRune(char) + spaceBufLen++ } else { - wordBuf.WriteRune(char) + wordBufLen++ - if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + if current+wordBufLen+spaceBufLen > lim && wordBufLen < lim { buf.WriteRune('\n') current = 0 spaceBuf.Reset() + spaceBufLen = 0 } } } if wordBuf.Len() == 0 { - if current+uint(spaceBuf.Len()) <= lim { + if current+spaceBufLen <= lim { spaceBuf.WriteTo(buf) } } else { diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go index fe828c8f583..403a893310b 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go @@ -29,35 +29,38 @@ type Info struct { // ID is a unique identifier of the mount (may be reused after umount). ID int - // Parent indicates the ID of the mount parent (or of self for the top of the - // mount tree). + // Parent is the ID of the parent mount (or of self for the root + // of this mount namespace's mount tree). Parent int - // Major indicates one half of the device ID which identifies the device class. - Major int + // Major and Minor are the major and the minor components of the Dev + // field of unix.Stat_t structure returned by unix.*Stat calls for + // files on this filesystem. + Major, Minor int - // Minor indicates one half of the device ID which identifies a specific - // instance of device. - Minor int - - // Root of the mount within the filesystem. + // Root is the pathname of the directory in the filesystem which forms + // the root of this mount. Root string - // Mountpoint indicates the mount point relative to the process's root. + // Mountpoint is the pathname of the mount point relative to the + // process's root directory. Mountpoint string - // Options represents mount-specific options. + // Options is a comma-separated list of mount options. Options string - // Optional represents optional fields. + // Optional are zero or more fields of the form "tag[:value]", + // separated by a space. Currently, the possible optional fields are + // "shared", "master", "propagate_from", and "unbindable". For more + // information, see mount_namespaces(7) Linux man page. Optional string - // FSType indicates the type of filesystem, such as EXT3. + // FSType is the filesystem type in the form "type[.subtype]". FSType string - // Source indicates filesystem specific information or "none". + // Source is filesystem-specific information, or "none". Source string - // VFSOptions represents per super block options. + // VFSOptions is a comma-separated list of superblock options. VFSOptions string } diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go index 5869b2cee39..16079c3c541 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go @@ -14,11 +14,16 @@ import "strings" // stop: true if parsing should be stopped after the entry. type FilterFunc func(*Info) (skip, stop bool) -// PrefixFilter discards all entries whose mount points -// do not start with a specific prefix. +// PrefixFilter discards all entries whose mount points do not start with, or +// are equal to the path specified in prefix. The prefix path must be absolute, +// have all symlinks resolved, and cleaned (i.e. no extra slashes or dots). +// +// PrefixFilter treats prefix as a path, not a partial prefix, which means that +// given "/foo", "/foo/bar" and "/foobar" entries, PrefixFilter("/foo") returns +// "/foo" and "/foo/bar", and discards "/foobar". func PrefixFilter(prefix string) FilterFunc { return func(m *Info) (bool, bool) { - skip := !strings.HasPrefix(m.Mountpoint, prefix) + skip := !strings.HasPrefix(m.Mountpoint+"/", prefix+"/") return skip, false } } diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go index e591c836538..f09a70fa0d9 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go @@ -12,7 +12,8 @@ import ( // GetMountsFromReader retrieves a list of mounts from the // reader provided, with an optional filter applied (use nil // for no filter). This can be useful in tests or benchmarks -// that provide a fake mountinfo data. +// that provide fake mountinfo data, or when a source other +// than /proc/self/mountinfo needs to be read from. // // This function is Linux-specific. func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { @@ -133,8 +134,6 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { return out, nil } -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts func parseMountTable(filter FilterFunc) ([]*Info, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore index df63b129a79..b0747ff010a 100644 --- a/vendor/github.com/moby/term/.gitignore +++ b/vendor/github.com/moby/term/.gitignore @@ -1,24 +1,8 @@ -# Docker project generated files to ignore -# if you want to ignore files created by your editor/tools, -# please consider a global .gitignore https://help.github.com/articles/ignoring-files -*.exe -*.exe~ -*.gz -*.orig -test.main -.*.swp -.DS_Store -# a .bashrc may be added to customize the build environment -.bashrc -.editorconfig -.gopath/ -.go-pkg-cache/ -.idea/ -autogen/ -bundles/ -cmd/dockerd/dockerd -contrib/builder/rpm/*/changelog -vendor/pkg/ -go-test-report.json +# if you want to ignore files created by your editor/tools, consider using a +# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files +.* +!.github +!.gitignore profile.out -junit-report.xml +# support running go modules in vendor mode for local development +vendor/ diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md new file mode 100644 index 00000000000..0ce92cc3398 --- /dev/null +++ b/vendor/github.com/moby/term/README.md @@ -0,0 +1,36 @@ +# term - utilities for dealing with terminals + +![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) + +term provides structures and helper functions to work with terminal (state, sizes). + +#### Using term + +```go +package main + +import ( + "log" + "os" + + "github.com/moby/term" +) + +func main() { + fd := os.Stdin.Fd() + if term.IsTerminal(fd) { + ws, err := term.GetWinsize(fd) + if err != nil { + log.Fatalf("term.GetWinsize: %s", err) + } + log.Printf("%d:%d\n", ws.Height, ws.Width) + } +} +``` + +## Contributing + +Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. + +## Copyright and license +Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/moby/term/ascii.go index 7c445686f3b..55873c0556c 100644 --- a/vendor/github.com/moby/term/ascii.go +++ b/vendor/github.com/moby/term/ascii.go @@ -1,4 +1,4 @@ -package term // import "github.com/moby/term" +package term import ( "fmt" diff --git a/vendor/github.com/moby/term/go.mod b/vendor/github.com/moby/term/go.mod index 9f23ce0d5e4..f453204333a 100644 --- a/vendor/github.com/moby/term/go.mod +++ b/vendor/github.com/moby/term/go.mod @@ -4,10 +4,9 @@ go 1.13 require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 - github.com/google/go-cmp v0.3.1 + github.com/creack/pty v1.1.11 + github.com/google/go-cmp v0.4.0 github.com/pkg/errors v0.9.1 // indirect - github.com/sirupsen/logrus v1.4.2 - golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 - gotest.tools v2.2.0+incompatible - gotest.tools/v3 v3.0.2 // indirect + golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a + gotest.tools/v3 v3.0.2 ) diff --git a/vendor/github.com/moby/term/go.sum b/vendor/github.com/moby/term/go.sum index 413bf366683..441e06137ac 100644 --- a/vendor/github.com/moby/term/go.sum +++ b/vendor/github.com/moby/term/go.sum @@ -1,33 +1,23 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a h1:i47hUS795cOydZI4AwJQCKXOr4BvxzvikwDoDtHhP2Y= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go index 9573222f9fa..c47756b89a9 100644 --- a/vendor/github.com/moby/term/proxy.go +++ b/vendor/github.com/moby/term/proxy.go @@ -1,4 +1,4 @@ -package term // import "github.com/moby/term" +package term import ( "io" @@ -19,6 +19,7 @@ type escapeProxy struct { escapeKeys []byte escapeKeyPos int r io.Reader + buf []byte } // NewEscapeProxy returns a new TTY proxy reader which wraps the given reader @@ -31,48 +32,57 @@ func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { } } -func (r *escapeProxy) Read(buf []byte) (int, error) { - nr, err := r.r.Read(buf) +func (r *escapeProxy) Read(buf []byte) (n int, err error) { + if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { + return 0, EscapeError{} + } - if len(r.escapeKeys) == 0 { - return nr, err + if len(r.buf) > 0 { + n = copy(buf, r.buf) + r.buf = r.buf[n:] } - preserve := func() { - // this preserves the original key presses in the passed in buffer - nr += r.escapeKeyPos - preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf...) - r.escapeKeyPos = 0 - copy(buf[0:nr], preserve) + nr, err := r.r.Read(buf[n:]) + n += nr + if len(r.escapeKeys) == 0 { + return n, err } - if nr != 1 || err != nil { - if r.escapeKeyPos > 0 { - preserve() + for i := 0; i < n; i++ { + if buf[i] == r.escapeKeys[r.escapeKeyPos] { + r.escapeKeyPos++ + + // Check if the full escape sequence is matched. + if r.escapeKeyPos == len(r.escapeKeys) { + n = i + 1 - r.escapeKeyPos + if n < 0 { + n = 0 + } + return n, EscapeError{} + } + continue } - return nr, err - } - if buf[0] != r.escapeKeys[r.escapeKeyPos] { - if r.escapeKeyPos > 0 { - preserve() + // If we need to prepend a partial escape sequence from the previous + // read, make sure the new buffer size doesn't exceed len(buf). + // Otherwise, preserve any extra data in a buffer for the next read. + if i < r.escapeKeyPos { + preserve := make([]byte, 0, r.escapeKeyPos+n) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf[:n]...) + n = copy(buf, preserve) + i += r.escapeKeyPos + r.buf = append(r.buf, preserve[n:]...) } - return nr, nil + r.escapeKeyPos = 0 } - if r.escapeKeyPos == len(r.escapeKeys)-1 { - return 0, EscapeError{} + // If we're in the middle of reading an escape sequence, make sure we don't + // let the caller read it. If later on we find that this is not the escape + // sequence, we'll prepend it back to buf. + n -= r.escapeKeyPos + if n < 0 { + n = 0 } - - // Looks like we've got an escape key, but we need to match again on the next - // read. - // Store the current escape key we found so we can look for the next one on - // the next read. - // Since this is an escape key, make sure we don't let the caller read it - // If later on we find that this is not the escape sequence, we'll add the - // keys back - r.escapeKeyPos++ - return nr - r.escapeKeyPos, nil + return n, err } diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go index 162dfb2b2f5..65556027a6d 100644 --- a/vendor/github.com/moby/term/tc.go +++ b/vendor/github.com/moby/term/tc.go @@ -1,20 +1,19 @@ // +build !windows -package term // import "github.com/moby/term" +package term import ( - "syscall" - "unsafe" - "golang.org/x/sys/unix" ) -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err +func tcget(fd uintptr) (*Termios, error) { + p, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + return p, nil } -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err +func tcset(fd uintptr, p *Termios) error { + return unix.IoctlSetTermios(int(fd), setTermios, p) } diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go index 6d643516f39..29c6acf1c7e 100644 --- a/vendor/github.com/moby/term/term.go +++ b/vendor/github.com/moby/term/term.go @@ -2,7 +2,7 @@ // Package term provides structures and helper functions to work with // terminal (state, sizes). -package term // import "github.com/moby/term" +package term import ( "errors" @@ -50,8 +50,8 @@ func GetFdInfo(in interface{}) (uintptr, bool) { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 + _, err := tcget(fd) + return err == nil } // RestoreTerminal restores the terminal connected to the given file descriptor @@ -60,20 +60,16 @@ func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil + return tcset(fd, &state.termios) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { + termios, err := tcget(fd) + if err != nil { return nil, err } - - return &oldState, nil + return &State{termios: *termios}, nil } // DisableEcho applies the specified state to the terminal connected to the file @@ -82,7 +78,7 @@ func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= unix.ECHO - if err := tcset(fd, &newState); err != 0 { + if err := tcset(fd, &newState); err != nil { return err } handleInterrupt(fd, state) diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go index 1649c230647..ba82960d4a6 100644 --- a/vendor/github.com/moby/term/term_windows.go +++ b/vendor/github.com/moby/term/term_windows.go @@ -1,13 +1,12 @@ -package term // import "github.com/moby/term" +package term import ( "io" "os" "os/signal" - "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE - "github.com/Azure/go-ansiterm/winterm" windowsconsole "github.com/moby/term/windows" + "golang.org/x/sys/windows" ) // State holds the console mode for the terminal. @@ -28,37 +27,42 @@ var vtInputSupported bool func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // Turn on VT handling on all std handles, if possible. This might // fail, in which case we will fall back to terminal emulation. - var emulateStdin, emulateStdout, emulateStderr bool - fd := os.Stdin.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { + var ( + emulateStdin, emulateStdout, emulateStderr bool + + mode uint32 + ) + + fd := windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { emulateStdin = true } else { vtInputSupported = true } // Unconditionally set the console mode back even on failure because SetConsoleMode // remembers invalid bits on input handles. - winterm.SetConsoleMode(fd, mode) + _ = windows.SetConsoleMode(fd, mode) } - fd = os.Stdout.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { + fd = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStdout = true } else { - winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } - fd = os.Stderr.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { + fd = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStderr = true } else { - winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } @@ -67,19 +71,22 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // go-ansiterm hasn't switch to x/sys/windows. // TODO: switch back to x/sys/windows once go-ansiterm has switched if emulateStdin { - stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) + h := uint32(windows.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(int(h)) } else { stdIn = os.Stdin } if emulateStdout { - stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + h := uint32(windows.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(int(h)) } else { stdOut = os.Stdout } if emulateStderr { - stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + h := uint32(windows.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(int(h)) } else { stdErr = os.Stderr } @@ -94,8 +101,8 @@ func GetFdInfo(in interface{}) (uintptr, bool) { // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { return nil, err } @@ -109,20 +116,23 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - return windowsconsole.IsConsole(fd) + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) + return windows.SetConsoleMode(windows.Handle(fd), state.mode) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e + var mode uint32 + + if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { + return nil, err } return &State{mode: mode}, nil @@ -132,9 +142,9 @@ func SaveState(fd uintptr) (*State, error) { // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx func DisableEcho(fd uintptr, state *State) error { mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) + mode &^= windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT + err := windows.SetConsoleMode(windows.Handle(fd), mode) if err != nil { return err } @@ -169,7 +179,7 @@ func SetRawTerminalOutput(fd uintptr) (*State, error) { // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. - winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) + _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) return state, err } @@ -188,21 +198,21 @@ func MakeRaw(fd uintptr) (*State, error) { // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE if vtInputSupported { - mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT } - err = winterm.SetConsoleMode(fd, mode) + err = windows.SetConsoleMode(windows.Handle(fd), mode) if err != nil { return nil, err } @@ -215,7 +225,7 @@ func restoreAtInterrupt(fd uintptr, state *State) { go func() { _ = <-sigchan - RestoreTerminal(fd, state) + _ = RestoreTerminal(fd, state) os.Exit(0) }() } diff --git a/vendor/github.com/moby/term/termios_linux.go b/vendor/github.com/moby/term/termios.go similarity index 62% rename from vendor/github.com/moby/term/termios_linux.go rename to vendor/github.com/moby/term/termios.go index be39ff56cc0..0f028e2273a 100644 --- a/vendor/github.com/moby/term/termios_linux.go +++ b/vendor/github.com/moby/term/termios.go @@ -1,28 +1,24 @@ -package term // import "github.com/moby/term" +// +build !windows + +package term import ( "golang.org/x/sys/unix" ) -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) - // Termios is the Unix API for terminal I/O. -type Termios unix.Termios +type Termios = unix.Termios -// MakeRaw put the terminal connected to the given file descriptor into raw +// MakeRaw puts the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { - termios, err := unix.IoctlGetTermios(int(fd), getTermios) + termios, err := tcget(fd) if err != nil { return nil, err } - var oldState State - oldState.termios = Termios(*termios) + oldState := State{termios: *termios} termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) termios.Oflag &^= unix.OPOST @@ -32,7 +28,7 @@ func MakeRaw(fd uintptr) (*State, error) { termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 - if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { + if err := tcset(fd, termios); err != nil { return nil, err } return &oldState, nil diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go index da785106657..922dd4baab0 100644 --- a/vendor/github.com/moby/term/termios_bsd.go +++ b/vendor/github.com/moby/term/termios_bsd.go @@ -1,10 +1,8 @@ // +build darwin freebsd openbsd netbsd -package term // import "github.com/moby/term" +package term import ( - "unsafe" - "golang.org/x/sys/unix" ) @@ -12,31 +10,3 @@ const ( getTermios = unix.TIOCGETA setTermios = unix.TIOCSETA ) - -// Termios is the Unix API for terminal I/O. -type Termios unix.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - newState.Oflag &^= unix.OPOST - newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - newState.Cflag &^= (unix.CSIZE | unix.PARENB) - newState.Cflag |= unix.CS8 - newState.Cc[unix.VMIN] = 1 - newState.Cc[unix.VTIME] = 0 - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go new file mode 100644 index 00000000000..038fd61ba1e --- /dev/null +++ b/vendor/github.com/moby/term/termios_nonbsd.go @@ -0,0 +1,12 @@ +//+build !darwin,!freebsd,!netbsd,!openbsd,!windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go index 5114b63ebf1..155251521b0 100644 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ b/vendor/github.com/moby/term/windows/ansi_reader.go @@ -1,6 +1,6 @@ // +build windows -package windowsconsole // import "github.com/moby/term/windows" +package windowsconsole import ( "bytes" @@ -31,7 +31,6 @@ type ansiReader struct { // NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a // Windows console input handle. func NewAnsiReader(nFile int) io.ReadCloser { - initLogger() file, fd := winterm.GetStdFile(nFile) return &ansiReader{ file: file, @@ -59,8 +58,6 @@ func (ar *ansiReader) Read(p []byte) (int, error) { // Previously read bytes exist, read as much as we can and return if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - originalLength := len(ar.buffer) copiedLength := copy(p, ar.buffer) @@ -70,16 +67,14 @@ func (ar *ansiReader) Read(p []byte) (int, error) { ar.buffer = ar.buffer[copiedLength:] } - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) return copiedLength, nil } // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) + events, err := readInputEvents(ar, len(p)) if err != nil { return 0, err } else if len(events) == 0 { - logger.Debug("No input events detected") return 0, nil } @@ -87,11 +82,9 @@ func (ar *ansiReader) Read(p []byte) (int, error) { // Save excess bytes and right-size keyBytes if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) ar.buffer = keyBytes[len(p):] keyBytes = keyBytes[:len(p)] } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") return 0, nil } @@ -100,13 +93,11 @@ func (ar *ansiReader) Read(p []byte) (int, error) { return 0, errors.New("unexpected copy length encountered") } - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) return copiedLength, nil } // readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { +func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { // Determine the maximum number of records to retrieve // -- Cast around the type system to obtain the size of a single INPUT_RECORD. // unsafe.Sizeof requires an expression vs. a type-reference; the casting @@ -118,25 +109,23 @@ func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { } else if countRecords == 0 { countRecords = 1 } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) // Wait for and read input events events := make([]winterm.INPUT_RECORD, countRecords) nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) if err != nil { return nil, err } if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) + err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) if err != nil { return nil, err } } // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) return events[:nEvents], nil } diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go index 51bf9aa48cf..ccb5ef07757 100644 --- a/vendor/github.com/moby/term/windows/ansi_writer.go +++ b/vendor/github.com/moby/term/windows/ansi_writer.go @@ -1,6 +1,6 @@ // +build windows -package windowsconsole // import "github.com/moby/term/windows" +package windowsconsole import ( "io" @@ -24,7 +24,6 @@ type ansiWriter struct { // NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a // Windows console output handle. func NewAnsiWriter(nFile int) io.Writer { - initLogger() file, fd := winterm.GetStdFile(nFile) info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { @@ -32,9 +31,8 @@ func NewAnsiWriter(nFile int) io.Writer { } parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - aw := &ansiWriter{ + return &ansiWriter{ file: file, fd: fd, infoReset: info, @@ -42,10 +40,6 @@ func NewAnsiWriter(nFile int) io.Writer { escapeSequence: []byte(ansiterm.KEY_ESC_CSI), parser: parser, } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw } func (aw *ansiWriter) Fd() uintptr { @@ -58,7 +52,5 @@ func (aw *ansiWriter) Write(p []byte) (total int, err error) { return 0, nil } - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) return aw.parser.Parse(p) } diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go index 54e0fe839fc..993694ddcd9 100644 --- a/vendor/github.com/moby/term/windows/console.go +++ b/vendor/github.com/moby/term/windows/console.go @@ -1,11 +1,11 @@ // +build windows -package windowsconsole // import "github.com/moby/term/windows" +package windowsconsole import ( "os" - "github.com/Azure/go-ansiterm/winterm" + "golang.org/x/sys/windows" ) // GetHandleInfo returns file descriptor and bool indicating whether the file is a console. @@ -22,14 +22,18 @@ func GetHandleInfo(in interface{}) (uintptr, bool) { if file, ok := in.(*os.File); ok { inFd = file.Fd() - isTerminal = IsConsole(inFd) + isTerminal = isConsole(inFd) } return inFd, isTerminal } // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil +// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal() +var IsConsole = isConsole + +func isConsole(fd uintptr) bool { + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil } diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go new file mode 100644 index 00000000000..54265fffaff --- /dev/null +++ b/vendor/github.com/moby/term/windows/doc.go @@ -0,0 +1,5 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole diff --git a/vendor/github.com/moby/term/windows/windows.go b/vendor/github.com/moby/term/windows/windows.go deleted file mode 100644 index bb03e060181..00000000000 --- a/vendor/github.com/moby/term/windows/windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build windows -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole // import "github.com/moby/term/windows" - -import ( - "io/ioutil" - "os" - "sync" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go index 29a0463dc46..1ef98d59961 100644 --- a/vendor/github.com/moby/term/winsize.go +++ b/vendor/github.com/moby/term/winsize.go @@ -1,6 +1,6 @@ // +build !windows -package term // import "github.com/moby/term" +package term import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index 8b2883f976f..ea0966d5bd6 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.14.x - - 1.15.x - tip + - 1.16.x + - 1.15.x cache: directories: diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index bf51fe9cd29..4e0afc291bb 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,3 +1,31 @@ +## 1.16.1 + +### Fixes +- Supress --stream deprecation warning on windows (#793) + +## 1.16.0 + +### Features +- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913] + - Update README.md to advertise that Ginkgo 2.0 is coming. + - Backport the 2.0 DeprecationTracker and start alerting users + about upcoming deprecations. + +- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86] + +### Fixes +- Fix accidental reference to 1488 (#784) [9fb7fe4] + +## 1.15.2 + +### Fixes +- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0] + +## 1.15.1 + +### Fixes +- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305] + ## 1.15.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md index 64e85eee0b8..05321e6eafc 100644 --- a/vendor/github.com/onsi/ginkgo/README.md +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -1,11 +1,24 @@ ![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png) [![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) +[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). +# Ginkgo 2.0 is coming soon! + +An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [v2](https://github.com/onsi/ginkgo/tree/v2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion and links to the original [proposal doc](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#). + +As described in the [changelog](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711). + +The current timeline for completion of 2.0 looks like: + +- Early April 2021: first public release of 2.0, deprecation warnings land in v1. +- May 2021: first beta/rc of 2.0 with most new functionality in place. +- June/July 2021: 2.0 ships and fully replaces the 1.x codebase on master. + ## TLDR Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests. It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library. @@ -61,6 +74,8 @@ Describe("the strings package", func() { - [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. +- [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`. + - Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details. - A modular architecture that lets you easily: diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index 8c177811e94..5f4a4c26eda 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,7 +20,7 @@ import ( "fmt" ) -const VERSION = "1.15.0" +const VERSION = "1.16.1" type GinkgoConfigType struct { RandomSeed int64 @@ -219,10 +219,14 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor // flagFocus implements the -focus flag. func flagFocus(arg string) { - GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg) + if arg != "" { + GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg) + } } // flagSkip implements the -skip flag. func flagSkip(arg string) { - GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg) + if arg != "" { + GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg) + } } diff --git a/vendor/github.com/onsi/ginkgo/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/formatter/formatter.go new file mode 100644 index 00000000000..30d7cbe1297 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/formatter/formatter.go @@ -0,0 +1,190 @@ +package formatter + +import ( + "fmt" + "regexp" + "strings" +) + +const COLS = 80 + +type ColorMode uint8 + +const ( + ColorModeNone ColorMode = iota + ColorModeTerminal + ColorModePassthrough +) + +var SingletonFormatter = New(ColorModeTerminal) + +func F(format string, args ...interface{}) string { + return SingletonFormatter.F(format, args...) +} + +func Fi(indentation uint, format string, args ...interface{}) string { + return SingletonFormatter.Fi(indentation, format, args...) +} + +func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) +} + +type Formatter struct { + ColorMode ColorMode + colors map[string]string + styleRe *regexp.Regexp + preserveColorStylingTags bool +} + +func NewWithNoColorBool(noColor bool) Formatter { + if noColor { + return New(ColorModeNone) + } + return New(ColorModeTerminal) +} + +func New(colorMode ColorMode) Formatter { + f := Formatter{ + ColorMode: colorMode, + colors: map[string]string{ + "/": "\x1b[0m", + "bold": "\x1b[1m", + "underline": "\x1b[4m", + + "red": "\x1b[38;5;9m", + "orange": "\x1b[38;5;214m", + "coral": "\x1b[38;5;204m", + "magenta": "\x1b[38;5;13m", + "green": "\x1b[38;5;10m", + "dark-green": "\x1b[38;5;28m", + "yellow": "\x1b[38;5;11m", + "light-yellow": "\x1b[38;5;228m", + "cyan": "\x1b[38;5;14m", + "gray": "\x1b[38;5;243m", + "light-gray": "\x1b[38;5;246m", + "blue": "\x1b[38;5;12m", + }, + } + colors := []string{} + for color := range f.colors { + colors = append(colors, color) + } + f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}") + return f +} + +func (f Formatter) F(format string, args ...interface{}) string { + return f.Fi(0, format, args...) +} + +func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { + return f.Fiw(indentation, 0, format, args...) +} + +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + out := fmt.Sprintf(f.style(format), args...) + + if indentation == 0 && maxWidth == 0 { + return out + } + + lines := strings.Split(out, "\n") + + if maxWidth != 0 { + outLines := []string{} + + maxWidth = maxWidth - indentation*2 + for _, line := range lines { + if f.length(line) <= maxWidth { + outLines = append(outLines, line) + continue + } + outWords := []string{} + length := uint(0) + words := strings.Split(line, " ") + for _, word := range words { + wordLength := f.length(word) + if length+wordLength <= maxWidth { + length += wordLength + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + outWords = []string{word} + length = wordLength + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + } + + lines = outLines + } + + if indentation == 0 { + return strings.Join(lines, "\n") + } + + padding := strings.Repeat(" ", int(indentation)) + for i := range lines { + if lines[i] != "" { + lines[i] = padding + lines[i] + } + } + + return strings.Join(lines, "\n") +} + +func (f Formatter) length(styled string) uint { + n := uint(0) + inStyle := false + for _, b := range styled { + if inStyle { + if b == 'm' { + inStyle = false + } + continue + } + if b == '\x1b' { + inStyle = true + continue + } + n += 1 + } + return n +} + +func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string { + if len(elements) == 0 { + return "" + } + n := len(cycle) + out := "" + for i, text := range elements { + out += cycle[i%n] + text + if i < len(elements)-1 { + out += joiner + } + } + out += "{{/}}" + return f.style(out) +} + +func (f Formatter) style(s string) string { + switch f.ColorMode { + case ColorModeNone: + return f.styleRe.ReplaceAllString(s, "") + case ColorModePassthrough: + return s + case ColorModeTerminal: + return f.styleRe.ReplaceAllStringFunc(s, func(match string) string { + if out, ok := f.colors[strings.Trim(match, "{}")]; ok { + return out + } + return match + }) + } + + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index 7e8a487082d..998c2c2caf3 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -17,6 +17,7 @@ import ( "io" "net/http" "os" + "reflect" "strings" "time" @@ -32,6 +33,8 @@ import ( "github.com/onsi/ginkgo/types" ) +var deprecationTracker = types.NewDeprecationTracker() + const GINKGO_VERSION = config.VERSION const GINKGO_PANIC = ` Your test failed. @@ -205,21 +208,27 @@ func RunSpecs(t GinkgoTestingT, description string) bool { if config.DefaultReporterConfig.ReportFile != "" { reportFile := config.DefaultReporterConfig.ReportFile specReporters[0] = reporters.NewJUnitReporter(reportFile) - return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters) + specReporters = append(specReporters, buildDefaultReporter()) } - return RunSpecsWithCustomReporters(t, description, specReporters) + return runSpecsWithCustomReporters(t, description, specReporters) } //To run your tests with Ginkgo's default reporter and your custom reporter(s), replace //RunSpecs() with this method. func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) specReporters = append(specReporters, buildDefaultReporter()) - return RunSpecsWithCustomReporters(t, description, specReporters) + return runSpecsWithCustomReporters(t, description, specReporters) } //To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace //RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) + return runSpecsWithCustomReporters(t, description, specReporters) +} + +func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { writer := GinkgoWriter.(*writer.Writer) writer.SetStream(config.DefaultReporterConfig.Verbose) reporters := make([]reporters.Reporter, len(specReporters)) @@ -227,6 +236,11 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor reporters[i] = reporter } passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig) + + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport()) + } + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { fmt.Println("PASS | FOCUSED") os.Exit(types.GINKGO_FOCUS_EXIT_CODE) @@ -380,12 +394,14 @@ func XWhen(text string, body func()) bool { //Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a //function that accepts a Done channel. When you do this, you can also provide an optional timeout. func It(text string, body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) return true } //You can focus individual Its using FIt func FIt(text string, body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -406,12 +422,14 @@ func XIt(text string, _ ...interface{}) bool { //which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks //which apply to It blocks. func Specify(text string, body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) return true } //You can focus individual Specifys using FSpecify func FSpecify(text string, body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -484,6 +502,7 @@ func XMeasure(text string, _ ...interface{}) bool { // //You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. func BeforeSuite(body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -497,6 +516,7 @@ func BeforeSuite(body interface{}, timeout ...float64) bool { // //You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. func AfterSuite(body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -584,6 +604,7 @@ func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, tim //Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func BeforeEach(body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -594,6 +615,7 @@ func BeforeEach(body interface{}, timeout ...float64) bool { //Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func JustBeforeEach(body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -604,6 +626,7 @@ func JustBeforeEach(body interface{}, timeout ...float64) bool { //Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func JustAfterEach(body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -614,10 +637,30 @@ func JustAfterEach(body interface{}, timeout ...float64) bool { //Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func AfterEach(body interface{}, timeout ...float64) bool { + validateBodyFunc(body, codelocation.New(1)) global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } +func validateBodyFunc(body interface{}, cl types.CodeLocation) { + t := reflect.TypeOf(body) + if t.Kind() != reflect.Func { + return + } + + if t.NumOut() > 0 { + return + } + + if t.NumIn() == 0 { + return + } + + if t.In(0) == reflect.TypeOf(make(Done)) { + deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) + } +} + func parseTimeout(timeout ...float64) time.Duration { if len(timeout) == 0 { return global.DefaultTimeout diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod index 655060cf743..664372fb665 100644 --- a/vendor/github.com/onsi/ginkgo/go.mod +++ b/vendor/github.com/onsi/ginkgo/go.mod @@ -1,11 +1,11 @@ module github.com/onsi/ginkgo +go 1.15 + require ( - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/nxadm/tail v1.4.4 + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 + github.com/nxadm/tail v1.4.8 github.com/onsi/gomega v1.10.1 golang.org/x/sys v0.0.0-20210112080510-489259a85091 golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e ) - -go 1.13 diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum index 56a493f9dac..5c5c3c50204 100644 --- a/vendor/github.com/onsi/ginkgo/go.sum +++ b/vendor/github.com/onsi/ginkgo/go.sum @@ -1,6 +1,11 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -14,13 +19,19 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -69,6 +80,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index 963caaaff1b..01ddca6e1df 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -33,17 +33,12 @@ type JUnitTestSuite struct { type JUnitTestCase struct { Name string `xml:"name,attr"` ClassName string `xml:"classname,attr"` - PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"` FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` Skipped *JUnitSkipped `xml:"skipped,omitempty"` Time float64 `xml:"time,attr"` SystemOut string `xml:"system-out,omitempty"` } -type JUnitPassedMessage struct { - Message string `xml:",chardata"` -} - type JUnitFailureMessage struct { Type string `xml:"type,attr"` Message string `xml:",chardata"` @@ -114,9 +109,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { ClassName: reporter.testSuiteName, } if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { - testCase.PassedMessage = &JUnitPassedMessage{ - Message: specSummary.CapturedOutput, - } + testCase.SystemOut = specSummary.CapturedOutput } if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { testCase.FailureMessage = &JUnitFailureMessage{ diff --git a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go new file mode 100644 index 00000000000..7f7a9aeb886 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go @@ -0,0 +1,96 @@ +package types + +import ( + "github.com/onsi/ginkgo/formatter" +) + +type Deprecation struct { + Message string + DocLink string +} + +type deprecations struct{} + +var Deprecations = deprecations{} + +func (d deprecations) CustomReporter() Deprecation { + return Deprecation{ + Message: "You are using a custom reporter. Support for custom reporters will likely be removed in V2. Most users were using them to generate junit or teamcity reports and this functionality will be merged into the core reporter. In addition, Ginkgo 2.0 will support emitting a JSON-formatted report that users can then manipulate to generate custom reports.\n\n{{red}}{{bold}}If this change will be impactful to you please leave a comment on {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}", + DocLink: "removed-custom-reporters", + } +} + +func (d deprecations) V1Reporter() Deprecation { + return Deprecation{ + Message: "You are using a V1 Ginkgo Reporter. Please update your custom reporter to the new V2 Reporter interface.", + DocLink: "changed-reporter-interface", + } +} + +func (d deprecations) Async() Deprecation { + return Deprecation{ + Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", + DocLink: "removed-async-testing", + } +} + +func (d deprecations) Measure() Deprecation { + return Deprecation{ + Message: "Measure is deprecated in Ginkgo V2", + DocLink: "removed-measure", + } +} + +func (d deprecations) Convert() Deprecation { + return Deprecation{ + Message: "The convert command is deprecated in Ginkgo V2", + DocLink: "removed-ginkgo-convert", + } +} + +func (d deprecations) Blur() Deprecation { + return Deprecation{ + Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", + } +} + +type DeprecationTracker struct { + deprecations map[Deprecation][]CodeLocation +} + +func NewDeprecationTracker() *DeprecationTracker { + return &DeprecationTracker{ + deprecations: map[Deprecation][]CodeLocation{}, + } +} + +func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { + if len(cl) == 1 { + d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) + } else { + d.deprecations[deprecation] = []CodeLocation{} + } +} + +func (d *DeprecationTracker) DidTrackDeprecations() bool { + return len(d.deprecations) > 0 +} + +func (d *DeprecationTracker) DeprecationsReport() string { + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out += formatter.F("{{light-yellow}}============================================={{/}}\n") + out += formatter.F("Ginkgo 2.0 is under active development and will introduce (a small number of) breaking changes.\n") + out += formatter.F("To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md{{/}}\n") + out += formatter.F("To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n") + + for deprecation, locations := range d.deprecations { + out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") + if deprecation.DocLink != "" { + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink) + } + for _, location := range locations { + out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) + } + } + return out +} diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index 348e3014c6f..6543dc5539e 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -1,20 +1,18 @@ language: go arch: - - amd64 - - ppc64le + - amd64 + - ppc64le go: - - 1.14.x - - 1.15.x - gotip + - 1.16.x + - 1.15.x env: - GO111MODULE=on -install: - - go get -v ./... - - go build ./... - - go get github.com/onsi/ginkgo - - go install github.com/onsi/ginkgo/ginkgo +install: skip -script: make test +script: + - go mod tidy && git diff --exit-code go.mod go.sum + - make test diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 16095fa3c26..33bc7550926 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.11.0 + +### Features +- feature: add index to gstruct element func (#419) [334e00d] +- feat(gexec) Add CompileTest functions. Close #410 (#411) [47c613f] + +### Fixes +- Check more carefully for nils in WithTransform (#423) [3c60a15] +- fix: typo in Makefile [b82522a] +- Allow WithTransform function to accept a nil value (#422) [b75d2f2] +- fix: print value type for interface{} containers (#409) [f08e2dc] +- fix(BeElementOf): consistently flatten expected values [1fa9468] + ## 1.10.5 ### Fixes diff --git a/vendor/github.com/onsi/gomega/Dockerfile b/vendor/github.com/onsi/gomega/Dockerfile new file mode 100644 index 00000000000..11c7e63e753 --- /dev/null +++ b/vendor/github.com/onsi/gomega/Dockerfile @@ -0,0 +1 @@ +FROM golang:1.15 diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile index c92cd56e3ad..1c6d107e175 100644 --- a/vendor/github.com/onsi/gomega/Makefile +++ b/vendor/github.com/onsi/gomega/Makefile @@ -1,6 +1,33 @@ -test: - [ -z "`gofmt -s -w -l -e .`" ] - go vet - ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race +###### Help ################################################################### -.PHONY: test +.DEFAULT_GOAL = help + +.PHONY: help + +help: ## list Makefile targets + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +###### Targets ################################################################ + +test: version download fmt vet ginkgo ## Runs all build, static analysis, and test steps + +download: ## Download dependencies + go mod download + +vet: ## Run static code analysis + go vet ./... + +ginkgo: ## Run tests using Ginkgo + go run github.com/onsi/ginkgo/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race + +fmt: ## Checks that the code is formatted correcty + @@if [ -n "$$(gofmt -s -e -l -d .)" ]; then \ + echo "gofmt check failed: run 'gofmt -s -e -l -w .'"; \ + exit 1; \ + fi + +docker_test: ## Run tests in a container via docker-compose + docker-compose build test && docker-compose run --rm test make test + +version: ## Display the version of Go + @@go version diff --git a/vendor/github.com/onsi/gomega/docker-compose.yaml b/vendor/github.com/onsi/gomega/docker-compose.yaml new file mode 100644 index 00000000000..f37496143d5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/docker-compose.yaml @@ -0,0 +1,10 @@ +version: '3.0' + +services: + test: + build: + dockerfile: Dockerfile + context: . + working_dir: /app + volumes: + - ${PWD}:/app diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index e59d7d75b61..4f7462ab180 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -7,6 +7,7 @@ Gomega's format package pretty-prints objects. It explores input objects recurs package format import ( + "context" "fmt" "reflect" "strconv" @@ -44,16 +45,7 @@ var TruncateThreshold uint = 50 // after the first diff location in a truncated string assertion error message. var CharactersAroundMismatchToInclude uint = 5 -// Ctx interface defined here to keep backwards compatibility with go < 1.7 -// It matches the context.Context interface -type Ctx interface { - Deadline() (deadline time.Time, ok bool) - Done() <-chan struct{} - Err() error - Value(key interface{}) interface{} -} - -var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() var timeType = reflect.TypeOf(time.Time{}) //The default indentation string emitted by the format package @@ -181,7 +173,7 @@ Set PrintContextObjects to true to print the content of objects implementing con func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) - return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation)) } /* @@ -201,25 +193,20 @@ func IndentString(s string, indentation uint) string { return result } -func formatType(object interface{}) string { - t := reflect.TypeOf(object) - if t == nil { +func formatType(v reflect.Value) string { + switch v.Kind() { + case reflect.Invalid: return "nil" - } - switch t.Kind() { case reflect.Chan: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap()) case reflect.Ptr: - return fmt.Sprintf("%T | %p", object, object) + return fmt.Sprintf("%s | 0x%x", v.Type(), v.Pointer()) case reflect.Slice: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap()) case reflect.Map: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d", object, v.Len()) + return fmt.Sprintf("%s | len:%d", v.Type(), v.Len()) default: - return fmt.Sprintf("%T", object) + return fmt.Sprintf("%s", v.Type()) } } @@ -284,7 +271,7 @@ func formatValue(value reflect.Value, indentation uint) string { } return formatStruct(value, indentation) case reflect.Interface: - return formatValue(value.Elem(), indentation) + return formatInterface(value, indentation) default: if value.CanInterface() { return fmt.Sprintf("%#v", value.Interface()) @@ -379,6 +366,10 @@ func formatStruct(v reflect.Value, indentation uint) string { return fmt.Sprintf("{%s}", strings.Join(result, ", ")) } +func formatInterface(v reflect.Value, indentation uint) string { + return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation)) +} + func isNilValue(a reflect.Value) bool { switch a.Kind() { case reflect.Invalid: diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod index 6f853a5797b..8731e6ac20e 100644 --- a/vendor/github.com/onsi/gomega/go.mod +++ b/vendor/github.com/onsi/gomega/go.mod @@ -3,8 +3,8 @@ module github.com/onsi/gomega go 1.14 require ( - github.com/golang/protobuf v1.4.2 + github.com/golang/protobuf v1.4.3 github.com/onsi/ginkgo v1.12.1 golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb - gopkg.in/yaml.v2 v2.3.0 + gopkg.in/yaml.v2 v2.4.0 ) diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum index 54eeacd2ba4..df224db954b 100644 --- a/vendor/github.com/onsi/gomega/go.sum +++ b/vendor/github.com/onsi/gomega/go.sum @@ -1,46 +1,38 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -56,11 +48,9 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 1bc5288b8b2..41bcda74c05 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.10.5" +const GOMEGA_VERSION = "1.11.0" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go index 1f9d7a8e620..9ee75a5d511 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go @@ -18,23 +18,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err return false, fmt.Errorf("BeElement matcher expects actual to be typed") } - length := len(matcher.Elements) - valueAt := func(i int) interface{} { - return matcher.Elements[i] - } - // Special handling of a single element of type Array or Slice - if length == 1 && isArrayOrSlice(valueAt(0)) { - element := valueAt(0) - value := reflect.ValueOf(element) - length = value.Len() - valueAt = func(i int) interface{} { - return value.Index(i).Interface() - } - } - var lastError error - for i := 0; i < length; i++ { - matcher := &EqualMatcher{Expected: valueAt(i)} + for _, m := range flatten(matcher.Elements) { + matcher := &EqualMatcher{Expected: m} success, err := matcher.Match(actual) if err != nil { lastError = err @@ -49,9 +35,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err } func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be an element of", matcher.Elements) + return format.Message(actual, "to be an element of", presentable(matcher.Elements)) } func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be an element of", matcher.Elements) + return format.Message(actual, "not to be an element of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go index 8e58d8a0fb7..f3dec910104 100644 --- a/vendor/github.com/onsi/gomega/matchers/with_transform.go +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -40,15 +40,24 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) } func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { - // return error if actual's type is incompatible with Transform function's argument type - actualType := reflect.TypeOf(actual) - if !actualType.AssignableTo(m.transformArgType) { - return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + // prepare a parameter to pass to the Transform function + var param reflect.Value + if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) { + // The dynamic type of actual is compatible with the transform argument. + param = reflect.ValueOf(actual) + + } else if actual == nil && m.transformArgType.Kind() == reflect.Interface { + // The dynamic type of actual is unknown, so there's no way to make its + // reflect.Value. Create a nil of the transform argument, which is known. + param = reflect.Zero(m.transformArgType) + + } else { + return false, fmt.Errorf("Transform function expects '%s' but we have '%T'", m.transformArgType, actual) } // call the Transform function with `actual` fn := reflect.ValueOf(m.Transform) - result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + result := fn.Call([]reflect.Value{param}) m.transformedValue = result[0].Interface() // expect exactly one value return m.Matcher.Match(m.transformedValue) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go index 65cd40e9287..f19333e61eb 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go @@ -3,8 +3,8 @@ package user import ( - "fmt" "os/user" + "strconv" ) func lookupUser(username string) (User, error) { @@ -16,7 +16,7 @@ func lookupUser(username string) (User, error) { } func lookupUid(uid int) (User, error) { - u, err := user.LookupId(fmt.Sprintf("%d", uid)) + u, err := user.LookupId(strconv.Itoa(uid)) if err != nil { return User{}, err } @@ -32,7 +32,7 @@ func lookupGroup(groupname string) (Group, error) { } func lookupGid(gid int) (Group, error) { - g, err := user.LookupGroupId(fmt.Sprintf("%d", gid)) + g, err := user.LookupGroupId(strconv.Itoa(gid)) if err != nil { return Group{}, err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 4b89dad7376..a533bf5e668 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -466,7 +466,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err // we asked for a group but didn't find it. let's check to see // if we wanted a numeric group if !found { - gid, err := strconv.Atoi(ag) + gid, err := strconv.ParseInt(ag, 10, 64) if err != nil { return nil, fmt.Errorf("Unable to find group %s", ag) } @@ -474,7 +474,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err if gid < minId || gid > maxId { return nil, ErrRange } - gidMap[gid] = struct{}{} + gidMap[int(gid)] = struct{}{} } } gids := []int{} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 7b60f8bb3b2..5fceeb63533 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -60,7 +60,7 @@ type Process struct { SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` } -// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process. +// LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. // http://man7.org/linux/man-pages/man7/capabilities.7.html type LinuxCapabilities struct { // Bounding is the set of capabilities checked by the kernel. @@ -90,7 +90,7 @@ type User struct { // GID is the group id. GID uint32 `json:"gid" platform:"linux,solaris"` // Umask is the umask for the init process. - Umask uint32 `json:"umask,omitempty" platform:"linux,solaris"` + Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"` // AdditionalGids are additional group ids set for the container's process. AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` // Username is the user name. @@ -354,7 +354,7 @@ type LinuxRdma struct { // LinuxResources has container runtime resource constraints type LinuxResources struct { - // Devices configures the device whitelist. + // Devices configures the device allowlist. Devices []LinuxDeviceCgroup `json:"devices,omitempty"` // Memory restriction configuration Memory *LinuxMemory `json:"memory,omitempty"` @@ -372,6 +372,8 @@ type LinuxResources struct { // Limits are a set of key value pairs that define RDMA resource limits, // where the key is device name and value is resource limits. Rdma map[string]LinuxRdma `json:"rdma,omitempty"` + // Unified resources. + Unified map[string]string `json:"unified,omitempty"` } // LinuxDevice represents the mknod information for a Linux special device file @@ -392,7 +394,8 @@ type LinuxDevice struct { GID *uint32 `json:"gid,omitempty"` } -// LinuxDeviceCgroup represents a device rule for the whitelist controller +// LinuxDeviceCgroup represents a device rule for the devices specified to +// the device controller type LinuxDeviceCgroup struct { // Allow or deny Allow bool `json:"allow"` @@ -628,6 +631,7 @@ const ( ArchS390X Arch = "SCMP_ARCH_S390X" ArchPARISC Arch = "SCMP_ARCH_PARISC" ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" + ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" ) // LinuxSeccompAction taken upon Seccomp rule match @@ -635,12 +639,13 @@ type LinuxSeccompAction string // Define actions for Seccomp rules const ( - ActKill LinuxSeccompAction = "SCMP_ACT_KILL" - ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP" - ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO" - ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE" - ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW" - ActLog LinuxSeccompAction = "SCMP_ACT_LOG" + ActKill LinuxSeccompAction = "SCMP_ACT_KILL" + ActKillProcess LinuxSeccompAction = "SCMP_ACT_KILL_PROCESS" + ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP" + ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO" + ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE" + ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW" + ActLog LinuxSeccompAction = "SCMP_ACT_LOG" ) // LinuxSeccompOperator used to match syscall arguments in Seccomp diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go index 89dce34be20..e2e64c66311 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go @@ -1,5 +1,23 @@ package specs +// ContainerState represents the state of a container. +type ContainerState string + +const ( + // StateCreating indicates that the container is being created + StateCreating ContainerState = "creating" + + // StateCreated indicates that the runtime has finished the create operation + StateCreated ContainerState = "created" + + // StateRunning indicates that the container process has executed the + // user-specified program but has not exited + StateRunning ContainerState = "running" + + // StateStopped indicates that the container process has exited + StateStopped ContainerState = "stopped" +) + // State holds information about the runtime state of the container. type State struct { // Version is the version of the specification that is supported. @@ -7,7 +25,7 @@ type State struct { // ID is the container ID ID string `json:"id"` // Status is the runtime status of the container. - Status string `json:"status"` + Status ContainerState `json:"status"` // Pid is the process ID for the container process. Pid int `json:"pid,omitempty"` // Bundle is the path to the container's bundle directory. diff --git a/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go b/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go index c54ce442910..134d43a3e66 100644 --- a/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go +++ b/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go @@ -29,7 +29,6 @@ import ( // BareMetalMachineProviderSpec holds data that the actuator needs to provision // and manage a Machine. -// +k8s:openapi-gen=true type BareMetalMachineProviderSpec struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go b/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go index 122ba5f7b55..d1e6e296425 100644 --- a/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go +++ b/vendor/github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go @@ -24,7 +24,6 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // BareMetalMachineProviderStatus is the Schema for the baremetalmachineproviderstatuses API -// +k8s:openapi-gen=true type BareMetalMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go index 5959a0ebb26..1cc0998d92c 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go @@ -128,10 +128,32 @@ func getInfra() (*osconfigv1.Infrastructure, error) { return infra, nil } -type machineAdmissionFn func(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) +func getDNS() (*osconfigv1.DNS, error) { + cfg, err := ctrl.GetConfig() + if err != nil { + return nil, err + } + client, err := osclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + dns, err := client.ConfigV1().DNSes().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return dns, nil +} + +type machineAdmissionFn func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) + +type admissionConfig struct { + clusterID string + dnsDisconnected bool +} type admissionHandler struct { - clusterID string + *admissionConfig webhookOperations machineAdmissionFn decoder *admission.Decoder } @@ -163,14 +185,23 @@ func NewMachineValidator() (*machineValidatorHandler, error) { return nil, err } - return createMachineValidator(infra.Status.PlatformStatus.Type, infra.Status.InfrastructureName), nil + dns, err := getDNS() + if err != nil { + return nil, err + } + + return createMachineValidator(infra, dns), nil } -func createMachineValidator(platform osconfigv1.PlatformType, clusterID string) *machineValidatorHandler { +func createMachineValidator(infra *osconfigv1.Infrastructure, dns *osconfigv1.DNS) *machineValidatorHandler { + admissionConfig := &admissionConfig{ + dnsDisconnected: dns.Spec.PublicZone == nil, + clusterID: infra.Status.InfrastructureName, + } return &machineValidatorHandler{ admissionHandler: &admissionHandler{ - clusterID: clusterID, - webhookOperations: getMachineValidatorOperation(platform), + admissionConfig: admissionConfig, + webhookOperations: getMachineValidatorOperation(infra.Status.PlatformStatus.Type), }, } } @@ -187,7 +218,7 @@ func getMachineValidatorOperation(platform osconfigv1.PlatformType) machineAdmis return validateVSphere default: // just no-op - return func(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { + return func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { return true, []string{}, nil } } @@ -206,7 +237,7 @@ func NewMachineDefaulter() (*machineDefaulterHandler, error) { func createMachineDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineDefaulterHandler { return &machineDefaulterHandler{ admissionHandler: &admissionHandler{ - clusterID: clusterID, + admissionConfig: &admissionConfig{clusterID: clusterID}, webhookOperations: getMachineDefaulterOperation(platformStatus), }, } @@ -228,7 +259,7 @@ func getMachineDefaulterOperation(platformStatus *osconfigv1.PlatformStatus) mac return defaultVSphere default: // just no-op - return func(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { + return func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { return true, []string{}, nil } } @@ -417,7 +448,7 @@ func (h *machineValidatorHandler) Handle(ctx context.Context, req admission.Requ klog.V(3).Infof("Validate webhook called for Machine: %s", m.GetName()) - ok, warnings, errs := h.webhookOperations(m, h.clusterID) + ok, warnings, errs := h.webhookOperations(m, h.admissionConfig) if !ok { return responseWithWarnings(admission.Denied(errs.Error()), warnings) } @@ -446,7 +477,7 @@ func (h *machineDefaulterHandler) Handle(ctx context.Context, req admission.Requ m.Labels[MachineClusterIDLabel] = h.clusterID } - ok, warnings, errs := h.webhookOperations(m, h.clusterID) + ok, warnings, errs := h.webhookOperations(m, h.admissionConfig) if !ok { return responseWithWarnings(admission.Denied(errs.Error()), warnings) } @@ -462,7 +493,7 @@ type awsDefaulter struct { region string } -func (a awsDefaulter) defaultAWS(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func (a awsDefaulter) defaultAWS(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Defaulting AWS providerSpec") var errs []error @@ -513,7 +544,7 @@ func unmarshalInto(m *Machine, providerSpec interface{}) error { return nil } -func validateAWS(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func validateAWS(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Validating AWS providerSpec") var errs []error @@ -590,7 +621,7 @@ func validateAWS(m *Machine, clusterID string) (bool, []string, utilerrors.Aggre return true, warnings, nil } -func defaultAzure(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func defaultAzure(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Defaulting Azure providerSpec") var errs []error @@ -607,12 +638,12 @@ func defaultAzure(m *Machine, clusterID string) (bool, []string, utilerrors.Aggr // Vnet and Subnet need to be provided together by the user if providerSpec.Vnet == "" && providerSpec.Subnet == "" { - providerSpec.Vnet = defaultAzureVnet(clusterID) - providerSpec.Subnet = defaultAzureSubnet(clusterID) + providerSpec.Vnet = defaultAzureVnet(config.clusterID) + providerSpec.Subnet = defaultAzureSubnet(config.clusterID) } if providerSpec.Image == (azure.Image{}) { - providerSpec.Image.ResourceID = defaultAzureImageResourceID(clusterID) + providerSpec.Image.ResourceID = defaultAzureImageResourceID(config.clusterID) } if providerSpec.UserDataSecret == nil { @@ -645,7 +676,7 @@ func defaultAzure(m *Machine, clusterID string) (bool, []string, utilerrors.Aggr return true, warnings, nil } -func validateAzure(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func validateAzure(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Validating Azure providerSpec") var errs []error @@ -660,6 +691,9 @@ func validateAzure(m *Machine, clusterID string) (bool, []string, utilerrors.Agg errs = append(errs, field.Required(field.NewPath("providerSpec", "vmSize"), "vmSize should be set to one of the supported Azure VM sizes")) } + if providerSpec.PublicIP && config.dnsDisconnected { + errs = append(errs, field.Forbidden(field.NewPath("providerSpec", "publicIP"), "publicIP is not allowed in Azure disconnected installation")) + } // Vnet requires Subnet if providerSpec.Vnet != "" && providerSpec.Subnet == "" { errs = append(errs, field.Required(field.NewPath("providerSpec", "subnet"), "must provide a subnet when a virtual network is specified")) @@ -729,7 +763,7 @@ func validateAzureImage(image azure.Image) []error { return errors } -func defaultGCP(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func defaultGCP(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Defaulting GCP providerSpec") var errs []error @@ -746,15 +780,15 @@ func defaultGCP(m *Machine, clusterID string) (bool, []string, utilerrors.Aggreg if len(providerSpec.NetworkInterfaces) == 0 { providerSpec.NetworkInterfaces = append(providerSpec.NetworkInterfaces, &gcp.GCPNetworkInterface{ - Network: defaultGCPNetwork(clusterID), - Subnetwork: defaultGCPSubnetwork(clusterID), + Network: defaultGCPNetwork(config.clusterID), + Subnetwork: defaultGCPSubnetwork(config.clusterID), }) } - providerSpec.Disks = defaultGCPDisks(providerSpec.Disks, clusterID) + providerSpec.Disks = defaultGCPDisks(providerSpec.Disks, config.clusterID) if len(providerSpec.Tags) == 0 { - providerSpec.Tags = defaultGCPTags(clusterID) + providerSpec.Tags = defaultGCPTags(config.clusterID) } if providerSpec.UserDataSecret == nil { @@ -804,7 +838,7 @@ func defaultGCPDisks(disks []*gcp.GCPDisk, clusterID string) []*gcp.GCPDisk { return disks } -func validateGCP(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func validateGCP(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Validating GCP providerSpec") var errs []error @@ -927,7 +961,7 @@ func validateGCPServiceAccounts(serviceAccounts []gcp.GCPServiceAccount, parentP return errs } -func defaultVSphere(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func defaultVSphere(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Defaulting vSphere providerSpec") var errs []error @@ -959,7 +993,7 @@ func defaultVSphere(m *Machine, clusterID string) (bool, []string, utilerrors.Ag return true, warnings, nil } -func validateVSphere(m *Machine, clusterID string) (bool, []string, utilerrors.Aggregate) { +func validateVSphere(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { klog.V(3).Infof("Validating vSphere providerSpec") var errs []error diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go index e4693e76c23..549e7ed5427 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go @@ -32,14 +32,23 @@ func NewMachineSetValidator() (*machineSetValidatorHandler, error) { return nil, err } - return createMachineSetValidator(infra.Status.PlatformStatus.Type, infra.Status.InfrastructureName), nil + dns, err := getDNS() + if err != nil { + return nil, err + } + + return createMachineSetValidator(infra, dns), nil } -func createMachineSetValidator(platform osconfigv1.PlatformType, clusterID string) *machineSetValidatorHandler { +func createMachineSetValidator(infra *osconfigv1.Infrastructure, dns *osconfigv1.DNS) *machineSetValidatorHandler { + admissionConfig := &admissionConfig{ + dnsDisconnected: dns.Spec.PublicZone == nil, + clusterID: infra.Status.InfrastructureName, + } return &machineSetValidatorHandler{ admissionHandler: &admissionHandler{ - clusterID: clusterID, - webhookOperations: getMachineValidatorOperation(platform), + admissionConfig: admissionConfig, + webhookOperations: getMachineValidatorOperation(infra.Status.PlatformStatus.Type), }, } } @@ -57,7 +66,7 @@ func NewMachineSetDefaulter() (*machineSetDefaulterHandler, error) { func createMachineSetDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineSetDefaulterHandler { return &machineSetDefaulterHandler{ admissionHandler: &admissionHandler{ - clusterID: clusterID, + admissionConfig: &admissionConfig{clusterID: clusterID}, webhookOperations: getMachineDefaulterOperation(platformStatus), }, } @@ -108,7 +117,7 @@ func (h *machineSetValidatorHandler) validateMachineSet(ms *MachineSet) (bool, [ // Create a Machine from the MachineSet and validate the Machine template m := &Machine{Spec: ms.Spec.Template.Spec} - ok, warnings, err := h.webhookOperations(m, h.clusterID) + ok, warnings, err := h.webhookOperations(m, h.admissionConfig) if !ok { errs = append(errs, err.Errors()...) } @@ -122,7 +131,7 @@ func (h *machineSetValidatorHandler) validateMachineSet(ms *MachineSet) (bool, [ func (h *machineSetDefaulterHandler) defaultMachineSet(ms *MachineSet) (bool, []string, utilerrors.Aggregate) { // Create a Machine from the MachineSet and default the Machine template m := &Machine{Spec: ms.Spec.Template.Spec} - ok, warnings, err := h.webhookOperations(m, h.clusterID) + ok, warnings, err := h.webhookOperations(m, h.admissionConfig) if !ok { return false, warnings, utilerrors.NewAggregate(err.Errors()) } diff --git a/vendor/github.com/pquerna/cachecontrol/.travis.yml b/vendor/github.com/pquerna/cachecontrol/.travis.yml index e22bd31faec..0d966bb01cb 100644 --- a/vendor/github.com/pquerna/cachecontrol/.travis.yml +++ b/vendor/github.com/pquerna/cachecontrol/.travis.yml @@ -4,5 +4,5 @@ arch: language: go go: - - "1.14" - "1.15" + - "1.16" diff --git a/vendor/github.com/pquerna/cachecontrol/cacheobject/object.go b/vendor/github.com/pquerna/cachecontrol/cacheobject/object.go index bfc44c62cb1..ae38a317dbd 100644 --- a/vendor/github.com/pquerna/cachecontrol/cacheobject/object.go +++ b/vendor/github.com/pquerna/cachecontrol/cacheobject/object.go @@ -55,33 +55,17 @@ type ObjectResults struct { OutErr error } -// LOW LEVEL API: Check if a object is cachable. -func CachableObject(obj *Object, rv *ObjectResults) { - rv.OutReasons = nil - rv.OutWarnings = nil - rv.OutErr = nil - +// LOW LEVEL API: Check if a request is cacheable. +// This function doesn't reset the passed ObjectResults. +func CachableRequestObject(obj *Object, rv *ObjectResults) { switch obj.ReqMethod { case "GET": break case "HEAD": break case "POST": - /** - POST: http://tools.ietf.org/html/rfc7231#section-4.3.3 - - Responses to POST requests are only cacheable when they include - explicit freshness information (see Section 4.2.1 of [RFC7234]). - However, POST caching is not widely implemented. For cases where an - origin server wishes the client to be able to cache the result of a - POST in a way that can be reused by a later GET, the origin server - MAY send a 200 (OK) response containing the result and a - Content-Location header field that has the same value as the POST's - effective request URI (Section 3.1.4.2). - */ - if !hasFreshness(obj.ReqDirectives, obj.RespDirectives, obj.RespHeaders, obj.RespExpiresHeader, obj.CacheIsPrivate) { - rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodPOST) - } + // Responses to POST requests can be cacheable if they include explicit freshness information + break case "PUT": rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodPUT) @@ -109,10 +93,29 @@ func CachableObject(obj *Object, rv *ObjectResults) { if obj.ReqDirectives != nil && obj.ReqDirectives.NoStore { rv.OutReasons = append(rv.OutReasons, ReasonRequestNoStore) } +} + +// LOW LEVEL API: Check if a response is cacheable. +// This function doesn't reset the passed ObjectResults. +func CachableResponseObject(obj *Object, rv *ObjectResults) { + /** + POST: http://tools.ietf.org/html/rfc7231#section-4.3.3 + + Responses to POST requests are only cacheable when they include + explicit freshness information (see Section 4.2.1 of [RFC7234]). + However, POST caching is not widely implemented. For cases where an + origin server wishes the client to be able to cache the result of a + POST in a way that can be reused by a later GET, the origin server + MAY send a 200 (OK) response containing the result and a + Content-Location header field that has the same value as the POST's + effective request URI (Section 3.1.4.2). + */ + if obj.ReqMethod == http.MethodPost && !hasFreshness(obj.RespDirectives, obj.RespHeaders, obj.RespExpiresHeader, obj.CacheIsPrivate) { + rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodPOST) + } // Storing Responses to Authenticated Requests: http://tools.ietf.org/html/rfc7234#section-3.2 - authz := obj.ReqHeaders.Get("Authorization") - if authz != "" { + if obj.ReqHeaders.Get("Authorization") != "" { if obj.RespDirectives.MustRevalidate || obj.RespDirectives.Public || obj.RespDirectives.SMaxAge != -1 { @@ -149,18 +152,26 @@ func CachableObject(obj *Object, rv *ObjectResults) { * contains a public response directive (see Section 5.2.2.5). */ - expires := obj.RespHeaders.Get("Expires") != "" - statusCachable := cachableStatusCode(obj.RespStatusCode) - - if expires || + if obj.RespHeaders.Get("Expires") != "" || obj.RespDirectives.MaxAge != -1 || (obj.RespDirectives.SMaxAge != -1 && !obj.CacheIsPrivate) || - statusCachable || + cachableStatusCode(obj.RespStatusCode) || obj.RespDirectives.Public { /* cachable by default, at least one of the above conditions was true */ - } else { - rv.OutReasons = append(rv.OutReasons, ReasonResponseUncachableByDefault) + return } + + rv.OutReasons = append(rv.OutReasons, ReasonResponseUncachableByDefault) +} + +// LOW LEVEL API: Check if a object is cachable. +func CachableObject(obj *Object, rv *ObjectResults) { + rv.OutReasons = nil + rv.OutWarnings = nil + rv.OutErr = nil + + CachableRequestObject(obj, rv) + CachableResponseObject(obj, rv) } var twentyFourHours = time.Duration(24 * time.Hour) @@ -333,7 +344,7 @@ func UsingRequestResponseWithObject(req *http.Request, } // calculate if a freshness directive is present: http://tools.ietf.org/html/rfc7234#section-4.2.1 -func hasFreshness(reqDir *RequestCacheDirectives, respDir *ResponseCacheDirectives, respHeaders http.Header, respExpires time.Time, privateCache bool) bool { +func hasFreshness(respDir *ResponseCacheDirectives, respHeaders http.Header, respExpires time.Time, privateCache bool) bool { if !privateCache && respDir.SMaxAge != -1 { return true } diff --git a/vendor/github.com/pquerna/cachecontrol/go.mod b/vendor/github.com/pquerna/cachecontrol/go.mod index 701376794ae..368205a59fc 100644 --- a/vendor/github.com/pquerna/cachecontrol/go.mod +++ b/vendor/github.com/pquerna/cachecontrol/go.mod @@ -1,5 +1,5 @@ module github.com/pquerna/cachecontrol -go 1.14 +go 1.16 require github.com/stretchr/testify v1.6.1 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go deleted file mode 100644 index 288f0e85488..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.12 - -package prometheus - -import "runtime/debug" - -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. -func readBuildInfo() (path, version, sum string) { - path, version, sum = "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 957d93a2dbb..4bb816ab75a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -20,7 +20,7 @@ import ( "strings" "github.com/cespare/xxhash/v2" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 6f67d104649..db43ca5bab2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -384,7 +384,12 @@ type memStatsMetrics []struct { // https://github.com/povilasv/prommod for an example of a collector for the // module dependencies. func NewBuildInfoCollector() Collector { - path, version, sum := readBuildInfo() + path, version, sum := "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } c := &selfCollector{MustNewConstMetric( NewDesc( "go_build_info", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index f71e286be55..3346fa1c556 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,7 +22,7 @@ import ( "sync/atomic" "time" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index a2b80b1c19d..dc121910a52 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -17,7 +17,7 @@ import ( "strings" "time" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" @@ -58,7 +58,7 @@ type Metric interface { } // Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// implementation XXX has its own XXXOpts type, but in most cases, it is just // an alias of this type (which might change when the requirement arises.) // // It is mandatory to set Name to a non-empty string. All other fields are diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 5070e72e28f..e7c0d05464f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -83,8 +83,7 @@ type readerFromDelegator struct{ *responseWriterDelegator } type pusherDelegator struct{ *responseWriterDelegator } func (d closeNotifierDelegator) CloseNotify() <-chan bool { - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (d flusherDelegator) Flush() { @@ -348,8 +347,7 @@ func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) deleg } id := 0 - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. if _, ok := w.(http.CloseNotifier); ok { id += closeNotifier } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 48f5ef9d72f..383a7f5941a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -26,7 +26,7 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index cf700714967..fb5ce22bf60 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -23,7 +23,7 @@ import ( "time" "github.com/beorn7/perks/quantile" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 8304de47733..c778711b8ab 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,7 +19,7 @@ import ( "time" "unicode/utf8" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 6ba49d85bda..4ababe6c981 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -167,8 +167,8 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // calling the newMetric function provided during construction of the // MetricVec). // -// It is possible to call this method without using the returned Metry to only -// create the new Metric but leave it in its intitial state. +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it in its initial state. // // Keeping the Metric for later use is possible (and should be considered if // performance is critical), but keep in mind that Reset, DeleteLabelValues and diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index c1b12f0847e..74ee93280fe 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,7 +17,7 @@ import ( "fmt" "sort" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c40e6403ca8..7f67b16e429 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -14,6 +14,8 @@ package model import ( + "encoding/json" + "errors" "fmt" "math" "regexp" @@ -201,13 +203,23 @@ func ParseDuration(durationStr string) (Duration, error) { // Parse the match at pos `pos` in the regex and use `mult` to turn that // into ms, then add that value to the total parsed duration. + var overflowErr error m := func(pos int, mult time.Duration) { if matches[pos] == "" { return } n, _ := strconv.Atoi(matches[pos]) + + // Check if the provided duration overflows time.Duration (> ~ 290years). + if n > int((1<<63-1)/mult/time.Millisecond) { + overflowErr = errors.New("duration out of range") + } d := time.Duration(n) * time.Millisecond dur += d * mult + + if dur < 0 { + overflowErr = errors.New("duration out of range") + } } m(2, 1000*60*60*24*365) // y @@ -218,7 +230,7 @@ func ParseDuration(durationStr string) (Duration, error) { m(12, 1000) // s m(14, 1) // ms - return Duration(dur), nil + return Duration(dur), overflowErr } func (d Duration) String() string { @@ -254,6 +266,37 @@ func (d Duration) String() string { return r } +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Duration) UnmarshalJSON(bytes []byte) error { + var s string + if err := json.Unmarshal(bytes, &s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (d *Duration) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (d *Duration) UnmarshalText(text []byte) error { + var err error + *d, err = ParseDuration(string(text)) + return err +} + // MarshalYAML implements the yaml.Marshaler interface. func (d Duration) MarshalYAML() (interface{}, error) { return d.String(), nil diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml index 2f3351d7ae5..a49fff15ac1 100644 --- a/vendor/github.com/russross/blackfriday/.travis.yml +++ b/vendor/github.com/russross/blackfriday/.travis.yml @@ -3,6 +3,7 @@ language: go go: - "1.9.x" - "1.10.x" + - "1.11.x" - tip matrix: fast_finish: true diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt index 2885af3602d..7fbb253a8ec 100644 --- a/vendor/github.com/russross/blackfriday/LICENSE.txt +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -1,29 +1,28 @@ Blackfriday is distributed under the Simplified BSD License: -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. +Copyright © 2011 Russ Ross +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with + the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md index 3c62e137533..997ef5d4299 100644 --- a/vendor/github.com/russross/blackfriday/README.md +++ b/vendor/github.com/russross/blackfriday/README.md @@ -1,6 +1,6 @@ Blackfriday -[![Build Status][BuildSVG]][BuildURL] -[![Godoc][GodocV2SVG]][GodocV2URL] +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] =========== Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It @@ -18,12 +18,21 @@ It started as a translation from C of [Sundown][3]. Installation ------------ -Blackfriday is compatible with any modern Go release. With Go and git installed: +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: - go get -u gopkg.in/russross/blackfriday.v2 + go get github.com/russross/blackfriday -will download, compile, and install the package into your `$GOPATH` directory -hierarchy. +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday" + +and `go get` without parameters. + +Old versions of Go and legacy GOPATH mode might work, +but no effort is made to keep them working. Versions @@ -32,13 +41,9 @@ Versions Currently maintained and recommended version of Blackfriday is `v2`. It's being developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. +https://pkg.go.dev/github.com/russross/blackfriday/v2. -It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. Version 2 offers a number of improvements over v1: @@ -60,22 +65,7 @@ Potential drawbacks: If you are still interested in the legacy `v1`, you can import it from `github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://godoc.org/github.com/russross/blackfriday - -### Known issue with `dep` - -There is a known problem with using Blackfriday v1 _transitively_ and `dep`. -Currently `dep` prioritizes semver versions over anything else, and picks the -latest one, plus it does not apply a `[[constraint]]` specifier to transitively -pulled in packages. So if you're using something that uses Blackfriday v1, but -that something does not use `dep` yet, you will get Blackfriday v2 pulled in and -your first dependency will fail to build. - -There are couple of fixes for it, documented here: -https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version - -Meanwhile, `dep` team is working on a more general solution to the constraints -on transitive dependencies problem: https://github.com/golang/dep/issues/1124. +here: https://pkg.go.dev/github.com/russross/blackfriday. Usage @@ -86,12 +76,16 @@ Usage For basic usage, it is as simple as getting your input into a byte slice and calling: - output := blackfriday.MarkdownBasic(input) +```go +output := blackfriday.MarkdownBasic(input) +``` This renders it with no extensions enabled. To get a more useful feature set, use this instead: - output := blackfriday.MarkdownCommon(input) +```go +output := blackfriday.MarkdownCommon(input) +``` ### v2 @@ -121,7 +115,7 @@ Here's an example of simple usage of Blackfriday together with Bluemonday: ```go import ( "github.com/microcosm-cc/bluemonday" - "gopkg.in/russross/blackfriday.v2" + "github.com/russross/blackfriday" ) // ... @@ -154,7 +148,7 @@ markdown file using a standalone program. You can also browse the source directly on github if you are just looking for some example code: -* +* Note that if you have not already done so, installing `blackfriday-tool` will be sufficient to download and install @@ -171,12 +165,12 @@ anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The algorithm has a specification, so that other packages can create compatible anchor names and links to those anchors. -The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. -[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to create compatible links to the anchor names generated by blackfriday. This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients that want a small package and don't need full functionality of blackfriday. @@ -246,7 +240,7 @@ implements the following extensions: and supply a language (to make syntax highlighting simple). Just mark it like this: - ``` go + ```go func getTrue() bool { return true } @@ -258,7 +252,7 @@ implements the following extensions: To preserve classes of fenced code blocks while using the bluemonday HTML sanitizer, use the following policy: - ``` go + ```go p := bluemonday.UGCPolicy() p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") html := p.SanitizeBytes(unsafe) @@ -269,7 +263,7 @@ implements the following extensions: Cat : Fluffy animal everyone likes - + Internet : Vector of transmission for pictures of cats @@ -280,7 +274,7 @@ implements the following extensions: end of the document. A footnote looks like this: This is a footnote.[^1] - + [^1]: the footnote text. * **Autolinking**. Blackfriday can find URLs that have not been @@ -317,7 +311,7 @@ Other renderers Blackfriday is structured to allow alternative rendering engines. Here are a few of note: -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): provides a GitHub Flavored Markdown renderer with fenced code block highlighting, clickable heading anchor links. @@ -328,7 +322,7 @@ are a few of note: * [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, but for markdown. -* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): renders output as LaTeX. * [bfchroma](https://github.com/Depado/bfchroma/): provides convenience @@ -337,6 +331,10 @@ are a few of note: provides a drop-in renderer ready to use with Blackfriday, as well as options and means for further customization. +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + TODO ---- @@ -357,13 +355,10 @@ License [1]: https://daringfireball.net/projects/markdown/ "Markdown" [2]: https://golang.org/ "Go Language" [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" - [7]: https://github.com/golang/dep/ "dep" - [8]: https://github.com/Masterminds/glide "Glide" - - [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master - [BuildURL]: https://travis-ci.org/russross/blackfriday - [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg - [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go index 45c21a6c267..563cb290384 100644 --- a/vendor/github.com/russross/blackfriday/block.go +++ b/vendor/github.com/russross/blackfriday/block.go @@ -649,14 +649,17 @@ func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bo } i = skipChar(data, i, ' ') - if i >= len(data) || data[i] != '\n' { - if newlineOptional && i == len(data) { + if i >= len(data) { + if newlineOptional { return i, marker } return 0, "" } + if data[i] == '\n' { + i++ // Take newline into account + } - return i + 1, marker // Take newline into account. + return i, marker } // fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, @@ -1133,6 +1136,15 @@ func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { i++ } + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + if p.flags&EXTENSION_FENCED_CODE != 0 && i > line { + // determine if codeblock starts on the first line + _, codeBlockMarker = isFenceLine(data[line:i], nil, "", false) + } + // get working buffer var raw bytes.Buffer @@ -1140,11 +1152,6 @@ func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { raw.Write(data[line:i]) line = i - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - gatherlines: for line < len(data) { i++ @@ -1153,7 +1160,6 @@ gatherlines: for data[i-1] != '\n' { i++ } - // if it is an empty line, guess that it is part of this item // and move on to the next line if p.isEmpty(data[line:i]) > 0 { diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod index b05561a066d..d0f058ae90e 100644 --- a/vendor/github.com/russross/blackfriday/go.mod +++ b/vendor/github.com/russross/blackfriday/go.mod @@ -1 +1,3 @@ module github.com/russross/blackfriday + +go 1.13 diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go index e0a6c69c96d..fa044ca2152 100644 --- a/vendor/github.com/russross/blackfriday/html.go +++ b/vendor/github.com/russross/blackfriday/html.go @@ -32,6 +32,7 @@ const ( HTML_SAFELINK // only link to trusted protocols HTML_NOFOLLOW_LINKS // only link with rel="nofollow" HTML_NOREFERRER_LINKS // only link with rel="noreferrer" + HTML_NOOPENER_LINKS // only link with rel="noopener" HTML_HREF_TARGET_BLANK // add a blank target HTML_TOC // generate a table of contents HTML_OMIT_CONTENTS // skip the main contents (for a standalone table of contents) @@ -445,6 +446,9 @@ func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) { if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) { relAttrs = append(relAttrs, "noreferrer") } + if options.flags&HTML_NOOPENER_LINKS != 0 && !isRelativeLink(link) { + relAttrs = append(relAttrs, "noopener") + } if len(relAttrs) > 0 { out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " "))) } @@ -559,6 +563,9 @@ func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) { relAttrs = append(relAttrs, "noreferrer") } + if options.flags&HTML_NOOPENER_LINKS != 0 && !isRelativeLink(link) { + relAttrs = append(relAttrs, "noopener") + } if len(relAttrs) > 0 { out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " "))) } diff --git a/vendor/github.com/russross/blackfriday/inline.go b/vendor/github.com/russross/blackfriday/inline.go index 4483b8f19fd..27056ebcad3 100644 --- a/vendor/github.com/russross/blackfriday/inline.go +++ b/vendor/github.com/russross/blackfriday/inline.go @@ -252,7 +252,7 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int { case data[i] == '\n': textHasNl = true - case data[i-1] == '\\': + case isBackslashEscaped(data, i): continue case data[i] == '[': diff --git a/vendor/github.com/russross/blackfriday/markdown.go b/vendor/github.com/russross/blackfriday/markdown.go index 41595d62d08..a0349cafecb 100644 --- a/vendor/github.com/russross/blackfriday/markdown.go +++ b/vendor/github.com/russross/blackfriday/markdown.go @@ -132,6 +132,7 @@ var blockTags = map[string]struct{}{ "article": {}, "aside": {}, "canvas": {}, + "details": {}, "figcaption": {}, "figure": {}, "footer": {}, @@ -142,6 +143,7 @@ var blockTags = map[string]struct{}{ "output": {}, "progress": {}, "section": {}, + "summary": {}, "video": {}, } diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index e6ee8b3ab37..c1dbd5a3a3e 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -9,6 +9,7 @@ os: linux install: - ./travis/install.sh script: - - go run mage.go -v crossBuild - - go run mage.go lint - - go run mage.go test + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index f68fb86bda3..7567f612898 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,4 +1,20 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + # 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + Code quality: * use go 1.15 in travis * use magefile as task runner diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index e02e057e1ce..07a1e5fa724 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -88,10 +88,6 @@ func (entry *Entry) Dup() *Entry { // Returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { - return entry.bytes_nolock() -} - -func (entry *Entry) bytes_nolock() ([]byte, error) { return entry.Logger.Formatter.Format(entry) } @@ -222,8 +218,6 @@ func (entry Entry) HasCaller() (has bool) { entry.Caller != nil } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines func (entry *Entry) log(level Level, msg string) { var buffer *bytes.Buffer @@ -267,7 +261,15 @@ func (entry *Entry) log(level Level, msg string) { } func (entry *Entry) fireHooks() { - err := entry.Logger.Hooks.Fire(entry.Level, entry) + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } @@ -279,13 +281,11 @@ func (entry *Entry) write() { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - func() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - if _, err := entry.Logger.Out.Write(serialized); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - }() + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } } func (entry *Entry) Log(level Level, args ...interface{}) { diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod index 37004ff347d..b3919d5eabf 100644 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -2,7 +2,6 @@ module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/magefile/mage v1.10.0 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index bce26a18807..694c18b8454 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -1,12 +1,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g= -github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index afaf0fc8a2f..c96dc5636bf 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -23,6 +23,9 @@ func (f FieldMap) resolve(key fieldKey) string { // JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // DisableTimestamp allows disabling automatic timestamps in output diff --git a/vendor/github.com/sirupsen/logrus/magefile.go b/vendor/github.com/sirupsen/logrus/magefile.go deleted file mode 100644 index 9aa60393908..00000000000 --- a/vendor/github.com/sirupsen/logrus/magefile.go +++ /dev/null @@ -1,77 +0,0 @@ -// +build mage - -package main - -import ( - "encoding/json" - "fmt" - "os" - "path" - - "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" -) - -// getBuildMatrix returns the build matrix from the current version of the go compiler -func getBuildMatrix() (map[string][]string, error) { - jsonData, err := sh.Output("go", "tool", "dist", "list", "-json") - if err != nil { - return nil, err - } - var data []struct { - Goos string - Goarch string - } - if err := json.Unmarshal([]byte(jsonData), &data); err != nil { - return nil, err - } - - matrix := map[string][]string{} - for _, v := range data { - if val, ok := matrix[v.Goos]; ok { - matrix[v.Goos] = append(val, v.Goarch) - } else { - matrix[v.Goos] = []string{v.Goarch} - } - } - - return matrix, nil -} - -func CrossBuild() error { - matrix, err := getBuildMatrix() - if err != nil { - return err - } - - for os, arches := range matrix { - for _, arch := range arches { - env := map[string]string{ - "GOOS": os, - "GOARCH": arch, - } - if mg.Verbose() { - fmt.Printf("Building for GOOS=%s GOARCH=%s\n", os, arch) - } - if err := sh.RunWith(env, "go", "build", "./..."); err != nil { - return err - } - } - } - return nil -} - -func Lint() error { - gopath := os.Getenv("GOPATH") - if gopath == "" { - return fmt.Errorf("cannot retrieve GOPATH") - } - - return sh.Run(path.Join(gopath, "bin", "golangci-lint"), "run", "./...") -} - -// Run the test suite -func Test() error { - return sh.RunWith(map[string]string{"GORACE": "halt_on_error=1"}, - "go", "test", "-race", "-v", "./...") -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index 8fc698ad601..be2c6efe5ed 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -53,7 +53,10 @@ type TextFormatter struct { // the time passed since beginning of execution. FullTimestamp bool - // TimestampFormat to use for display when a full timestamp is printed + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // The fields are sorted by default for a consistent output. For applications diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml new file mode 100644 index 00000000000..0d6e61793a7 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -0,0 +1,48 @@ +run: + deadline: 5m + +linters: + disable-all: true + enable: + #- bodyclose + - deadcode + #- depguard + #- dogsled + #- dupl + - errcheck + #- exhaustive + #- funlen + - gas + #- gochecknoinits + - goconst + #- gocritic + #- gocyclo + #- gofmt + - goimports + - golint + #- gomnd + #- goprintffuncname + #- gosec + #- gosimple + - govet + - ineffassign + - interfacer + #- lll + - maligned + - megacheck + #- misspell + #- nakedret + #- noctx + #- nolintlint + #- rowserrcheck + #- scopelint + #- staticcheck + - structcheck + #- stylecheck + #- typecheck + - unconvert + #- unparam + #- unused + - varcheck + #- whitespace + fast: false diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml index a9bd4e54785..e0a3b50043b 100644 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -1,7 +1,6 @@ language: go stages: - - diff - test - build @@ -10,20 +9,20 @@ go: - 1.13.x - tip +env: GO111MODULE=on + before_install: - go get -u github.com/kyoh86/richgo - go get -u github.com/mitchellh/gox + - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest matrix: allow_failures: - go: tip include: - - stage: diff - go: 1.13.x - script: make fmt - stage: build go: 1.13.x script: make cobra_generator -script: +script: - make test diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md index 742d6d6e24a..8a23b4f8513 100644 --- a/vendor/github.com/spf13/cobra/CHANGELOG.md +++ b/vendor/github.com/spf13/cobra/CHANGELOG.md @@ -1,11 +1,40 @@ # Cobra Changelog -## Pending -* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` @jpmcb +## v1.1.3 + +* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility + +## v1.1.2 + +### Notable Changes + +* Bump license year to 2021 in golden files (#1309) @Bowbaq +* Enhance PowerShell completion with custom comp (#1208) @Luap99 +* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670 +* Documentation readability improvements (#1228 etc.) @zaataylor etc. +* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor + +## v1.1.1 + +* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context. +* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context. + +## v1.1.0 + +### Notable Changes + +* Extend Go completions and revamp zsh comp (#1070) +* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb +* Add completion for help command (#1136) +* Complete subcommands when TraverseChildren is set (#1171) +* Fix stderr printing functions (#894) +* fix: fish output redirection (#1247) ## v1.0.0 + Announcing v1.0.0 of Cobra. 🎉 -**Notable Changes** + +### Notable Changes * Fish completion (including support for Go custom completion) @marckhouzam * API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam * Remove/replace SetOutput on Command - deprecated @jpmcb diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md new file mode 100644 index 00000000000..9d16f88fd12 --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONDUCT.md @@ -0,0 +1,37 @@ +## Cobra User Contract + +### Versioning +Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. + +### Backward Compatibility +We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. + +### Deprecation +Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. + +### CVE +Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. + +### Communication +Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. + +### Breaking Changes +Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. + +There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. + +Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. + +Examples of breaking changes include: +- Removing or renaming exported constant, variable, type, or function. +- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... + - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. + +There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. + +### CI Testing +Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. + +### Disclaimer +Changes to this document and the contents therein are at the discretion of the maintainers. +None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index e9740d1e175..472c73bf16f 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -1,21 +1,29 @@ BIN="./bin" SRC=$(shell find . -name "*.go") +ifeq (, $(shell which golangci-lint)) +$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") +endif + ifeq (, $(shell which richgo)) $(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") endif -.PHONY: fmt vet test cobra_generator install_deps clean +.PHONY: fmt lint test cobra_generator install_deps clean default: all -all: fmt vet test cobra_generator +all: fmt test cobra_generator fmt: $(info ******************** checking formatting ********************) @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) -test: install_deps vet +lint: + $(info ******************** running lint tools ********************) + golangci-lint run -v + +test: install_deps lint $(info ******************** running tests ********************) richgo test -v ./... @@ -28,9 +36,5 @@ install_deps: $(info ******************** downloading dependencies ********************) go get -v ./... -vet: - $(info ******************** vetting ********************) - go vet ./... - clean: rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 3cf1b25d8e7..a1b13ddda6c 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -6,6 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), [Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. +[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) [![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) @@ -62,8 +63,8 @@ Cobra is built on a structure of commands, arguments & flags. **Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. +The best applications read like sentences when used, and as a result, users +intuitively know how to interact with them. The pattern to follow is `APPNAME VERB NOUN --ADJECTIVE.` @@ -234,11 +235,6 @@ func init() { rootCmd.AddCommand(initCmd) } -func er(msg interface{}) { - fmt.Println("Error:", msg) - os.Exit(1) -} - func initConfig() { if cfgFile != "" { // Use config file from the flag. @@ -246,9 +242,7 @@ func initConfig() { } else { // Find home directory. home, err := homedir.Dir() - if err != nil { - er(err) - } + cobra.CheckErr(err) // Search config in home directory with name ".cobra" (without extension). viper.AddConfigPath(home) @@ -268,7 +262,7 @@ func initConfig() { With the root command you need to have your main function execute it. Execute should be run on the root for clarity, though it can be called on any command. -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. +In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. ```go package main @@ -363,7 +357,7 @@ There are two different approaches to assign a flag. ### Persistent Flags -A flag can be 'persistent' meaning that this flag will be available to the +A flag can be 'persistent', meaning that this flag will be available to the command it's assigned to as well as every command under that command. For global flags, assign a flag as a persistent flag on the root. @@ -373,7 +367,7 @@ rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose out ### Local Flags -A flag can also be assigned locally which will only apply to that specific command. +A flag can also be assigned locally, which will only apply to that specific command. ```go localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") @@ -381,8 +375,8 @@ localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to rea ### Local Flag on Parent Commands -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will +By default, Cobra only parses local flags on the target command, and any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will parse local flags on each command before executing the target command. ```go @@ -404,8 +398,8 @@ func init() { } ``` -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, +In this example, the persistent flag `author` is bound with `viper`. +**Note**: the variable `author` will not be set to the value from config, when the `--author` flag is not provided by user. More in [viper documentation](https://github.com/spf13/viper#working-with-flags). @@ -465,7 +459,7 @@ var cmd = &cobra.Command{ In the example below, we have defined three commands. Two are at the top level and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished +is not executable, meaning that a subcommand is required. This is accomplished by not providing a 'Run' for the 'rootCmd'. We have only defined one flag for a single command. @@ -759,7 +753,7 @@ Cobra can generate documentation based on subcommands, flags, etc. Read more abo ## Generating shell completions -Cobra can generate a shell-completion file for the following shells: Bash, Zsh, Fish, Powershell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). +Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). # License diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 846636d75b1..7106147937e 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -19,9 +19,9 @@ const ( BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" ) -func writePreamble(buf *bytes.Buffer, name string) { - buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` +func writePreamble(buf io.StringWriter, name string) { + WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` __%[1]s_debug() { if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then @@ -380,10 +380,10 @@ __%[1]s_handle_word() ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) } -func writePostscript(buf *bytes.Buffer, name string) { +func writePostscript(buf io.StringWriter, name string) { name = strings.Replace(name, ":", "__", -1) - buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) - buf.WriteString(fmt.Sprintf(`{ + WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(`{ local cur prev words cword declare -A flaghash 2>/dev/null || : declare -A aliashash 2>/dev/null || : @@ -410,33 +410,33 @@ func writePostscript(buf *bytes.Buffer, name string) { } `, name)) - buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then complete -o default -F __start_%s %s else complete -o default -o nospace -F __start_%s %s fi `, name, name, name, name)) - buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") + WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") } -func writeCommands(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" commands=()\n") +func writeCommands(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " commands=()\n") for _, c := range cmd.Commands() { if !c.IsAvailableCommand() && c != cmd.helpCommand { continue } - buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) writeCmdAliases(buf, c) } - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { +func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { for key, value := range annotations { switch key { case BashCompFilenameExt: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) > 0 { @@ -444,17 +444,18 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) case BashCompCustom: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + if len(value) > 0 { handlers := strings.Join(value, "; ") - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) } else { - buf.WriteString(" flags_completion+=(:)\n") + WriteStringAndCheck(buf, " flags_completion+=(:)\n") } case BashCompSubdirsInDir: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) == 1 { @@ -462,46 +463,48 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir -d" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) } } } -func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +const cbn = "\")\n" + +func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Shorthand format := " " if len(flag.NoOptDefVal) == 0 { format += "two_word_" } - format += "flags+=(\"-%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += "flags+=(\"-%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) } -func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Name format := " flags+=(\"--%s" if len(flag.NoOptDefVal) == 0 { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format = " two_word_flags+=(\"--%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) } writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) } -func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { +func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { name := flag.Name - format := " local_nonpersistent_flags+=(\"--%[1]s\")\n" + format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn if len(flag.NoOptDefVal) == 0 { - format += " local_nonpersistent_flags+=(\"--%[1]s=\")\n" + format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn } - buf.WriteString(fmt.Sprintf(format, name)) + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) + WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) } } @@ -519,9 +522,9 @@ func prepareCustomAnnotationsForFlags(cmd *Command) { } } -func writeFlags(buf *bytes.Buffer, cmd *Command) { +func writeFlags(buf io.StringWriter, cmd *Command) { prepareCustomAnnotationsForFlags(cmd) - buf.WriteString(` flags=() + WriteStringAndCheck(buf, ` flags=() two_word_flags=() local_nonpersistent_flags=() flags_with_completion=() @@ -553,11 +556,11 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { } }) - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_flag=()\n") +func writeRequiredFlag(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_flag=()\n") flags := cmd.NonInheritedFlags() flags.VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -570,55 +573,55 @@ func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { if flag.Value.Type() != "bool" { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, flag.Name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } } }) } -func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) +func writeRequiredNouns(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_noun=()\n") + sort.Strings(cmd.ValidArgs) for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. value = strings.Split(value, "\t")[0] - buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { - buf.WriteString(" has_completion_function=1\n") + WriteStringAndCheck(buf, " has_completion_function=1\n") } } -func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { +func writeCmdAliases(buf io.StringWriter, cmd *Command) { if len(cmd.Aliases) == 0 { return } - sort.Sort(sort.StringSlice(cmd.Aliases)) + sort.Strings(cmd.Aliases) - buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) for _, value := range cmd.Aliases { - buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) - buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) } - buf.WriteString(` fi`) - buf.WriteString("\n") + WriteStringAndCheck(buf, ` fi`) + WriteStringAndCheck(buf, "\n") } -func writeArgAliases(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" noun_aliases=()\n") - sort.Sort(sort.StringSlice(cmd.ArgAliases)) +func writeArgAliases(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " noun_aliases=()\n") + sort.Strings(cmd.ArgAliases) for _, value := range cmd.ArgAliases { - buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) } } -func gen(buf *bytes.Buffer, cmd *Command) { +func gen(buf io.StringWriter, cmd *Command) { for _, c := range cmd.Commands() { if !c.IsAvailableCommand() && c != cmd.helpCommand { continue @@ -630,22 +633,22 @@ func gen(buf *bytes.Buffer, cmd *Command) { commandName = strings.Replace(commandName, ":", "__", -1) if cmd.Root() == cmd { - buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) } else { - buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) } - buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) - buf.WriteString("\n") - buf.WriteString(" command_aliases=()\n") - buf.WriteString("\n") + WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) + WriteStringAndCheck(buf, "\n") + WriteStringAndCheck(buf, " command_aliases=()\n") + WriteStringAndCheck(buf, "\n") writeCommands(buf, cmd) writeFlags(buf, cmd) writeRequiredFlag(buf, cmd) writeRequiredNouns(buf, cmd) writeArgAliases(buf, cmd) - buf.WriteString("}\n\n") + WriteStringAndCheck(buf, "}\n\n") } // GenBashCompletion generates bash completion file and writes to the passed writer. diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index a82d5bb8b42..130f99b9230 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -4,7 +4,7 @@ Please refer to [Shell Completions](shell_completions.md) for details. ## Bash legacy dynamic completions -For backwards-compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. +For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index d01becc8fa6..d6cbfd71985 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -19,6 +19,7 @@ package cobra import ( "fmt" "io" + "os" "reflect" "strconv" "strings" @@ -205,3 +206,17 @@ func stringInSlice(a string, list []string) bool { } return false } + +// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. +func CheckErr(msg interface{}) { + if msg != nil { + fmt.Fprintln(os.Stderr, "Error:", msg) + os.Exit(1) + } +} + +// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. +func WriteStringAndCheck(b io.StringWriter, s string) { + _, err := b.WriteString(s) + CheckErr(err) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 77b399e02ee..d6732ad1154 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -84,9 +84,6 @@ type Command struct { // Deprecated defines, if this command is deprecated and should print this string when used. Deprecated string - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - // Annotations are key/value pairs that can be used by applications to identify or // group commands. Annotations map[string]string @@ -126,55 +123,6 @@ type Command struct { // PersistentPostRunE: PersistentPostRun but returns an error. PersistentPostRunE func(cmd *Command, args []string) error - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - // FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - ctx context.Context - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - // args is actual args parsed from flags. args []string // flagErrorBuf contains all error messages from pflag. @@ -216,6 +164,60 @@ type Command struct { outWriter io.Writer // errWriter is a writer defined by the user that replaces stderr errWriter io.Writer + + //FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + ctx context.Context + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int } // Context returns underlying command context. If command wasn't @@ -418,7 +420,7 @@ func (c *Command) UsageString() string { c.outWriter = bb c.errWriter = bb - c.Usage() + CheckErr(c.Usage()) // Setting things back to normal c.outWriter = tmpOutput @@ -964,13 +966,13 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { return cmd, nil } - // If root command has SilentErrors flagged, + // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { c.PrintErrln("Error:", err.Error()) } - // If root command has SilentUsage flagged, + // If root command has SilenceUsage flagged, // all subcommands should respect it if !cmd.SilenceUsage && !c.SilenceUsage { c.Println(cmd.UsageString()) @@ -1087,10 +1089,10 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`, cmd, _, e := c.Root().Find(args) if cmd == nil || e != nil { c.Printf("Unknown help topic %#q\n", args) - c.Root().Usage() + CheckErr(c.Root().Usage()) } else { cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - cmd.Help() + CheckErr(cmd.Help()) } }, } diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go index f9e88e081fc..fa060c147be 100644 --- a/vendor/github.com/spf13/cobra/custom_completions.go +++ b/vendor/github.com/spf13/cobra/custom_completions.go @@ -527,13 +527,13 @@ func CompDebug(msg string, printToStdErr bool) { os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err == nil { defer f.Close() - f.WriteString(msg) + WriteStringAndCheck(f, msg) } } if printToStdErr { // Must print to stderr for this not to be read by the completion script. - fmt.Fprintf(os.Stderr, msg) + fmt.Fprint(os.Stderr, msg) } } diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index eaae9bca866..3e112347d7b 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -8,7 +8,7 @@ import ( "strings" ) -func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) { +func genFishComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name nameForVar = strings.Replace(nameForVar, "-", "_", -1) @@ -18,8 +18,8 @@ func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) { if !includeDesc { compCmd = ShellCompNoDescRequestCmd } - buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` + WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` function __%[1]s_debug set file "$BASH_COMP_DEBUG_FILE" if test -n "$file" diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod index 57e3244d5e3..ff56144056a 100644 --- a/vendor/github.com/spf13/cobra/go.mod +++ b/vendor/github.com/spf13/cobra/go.mod @@ -8,5 +8,5 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.0 - gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v2 v2.4.0 ) diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum index 0aae738631c..9328ee3ee7c 100644 --- a/vendor/github.com/spf13/cobra/go.sum +++ b/vendor/github.com/spf13/cobra/go.sum @@ -304,8 +304,8 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 756c61b9dcb..c55be71cd14 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,6 +1,3 @@ -// PowerShell completions are based on the amazing work from clap: -// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs -// // The generated scripts require PowerShell v5.0+ (which comes Windows 10, but // can be downloaded separately for windows 7 or 8.1). @@ -11,90 +8,278 @@ import ( "fmt" "io" "os" - "strings" - - "github.com/spf13/pflag" ) -var powerShellCompletionTemplate = `using namespace System.Management.Automation -using namespace System.Management.Automation.Language -Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock { - param($wordToComplete, $commandAst, $cursorPosition) - $commandElements = $commandAst.CommandElements - $command = @( - '%s' - for ($i = 1; $i -lt $commandElements.Count; $i++) { - $element = $commandElements[$i] - if ($element -isnot [StringConstantExpressionAst] -or - $element.StringConstantType -ne [StringConstantType]::BareWord -or - $element.Value.StartsWith('-')) { - break - } - $element.Value - } - ) -join ';' - $completions = @(switch ($command) {%s - }) - $completions.Where{ $_.CompletionText -like "$wordToComplete*" } | - Sort-Object -Property ListItemText -}` - -func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) { - var cmdName string - if previousCommandName == "" { - cmdName = cmd.Name() - } else { - cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name()) - } - - fmt.Fprintf(out, "\n '%s' {", cmdName) - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - usage := escapeStringForPowerShell(flag.Usage) - if len(flag.Shorthand) > 0 { - fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage) - } - fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage) - }) - - for _, subCmd := range cmd.Commands() { - usage := escapeStringForPowerShell(subCmd.Short) - fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage) +func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd } + WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- - fmt.Fprint(out, "\n break\n }") - - for _, subCmd := range cmd.Commands() { - generatePowerShellSubcommandCases(out, subCmd, cmdName) - } +function __%[1]s_debug { + if ($env:BASH_COMP_DEBUG_FILE) { + "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" + } } -func escapeStringForPowerShell(s string) string { - return strings.Replace(s, "'", "''", -1) +filter __%[1]s_escapeStringWithSpecialChars { +`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - buf := new(bytes.Buffer) +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { + param( + $WordToComplete, + $CommandAst, + $CursorPosition + ) + + # Get the current command line and convert into a string + $Command = $CommandAst.CommandElements + $Command = "$Command" + + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CursorPosition location, so we need + # to truncate the command-line ($Command) up to the $CursorPosition location. + # Make sure the $Command is longer then the $CursorPosition before we truncate. + # This happens because the $Command does not include the last space. + if ($Command.Length -gt $CursorPosition) { + $Command=$Command.Substring(0,$CursorPosition) + } + __%[1]s_debug "Truncated command: $Command" + + $ShellCompDirectiveError=%[3]d + $ShellCompDirectiveNoSpace=%[4]d + $ShellCompDirectiveNoFileComp=%[5]d + $ShellCompDirectiveFilterFileExt=%[6]d + $ShellCompDirectiveFilterDirs=%[7]d + + # Prepare the command to request completions for the program. + # Split the command at the first space to separate the program and arguments. + $Program,$Arguments = $Command.Split(" ",2) + $RequestComp="$Program %[2]s $Arguments" + __%[1]s_debug "RequestComp: $RequestComp" + + # we cannot use $WordToComplete because it + # has the wrong values if the cursor was moved + # so use the last argument + if ($WordToComplete -ne "" ) { + $WordToComplete = $Arguments.Split(" ")[-1] + } + __%[1]s_debug "New WordToComplete: $WordToComplete" + + + # Check for flag with equal sign + $IsEqualFlag = ($WordToComplete -Like "--*=*" ) + if ( $IsEqualFlag ) { + __%[1]s_debug "Completing equal sign flag" + # Remove the flag part + $Flag,$WordToComplete = $WordToComplete.Split("=",2) + } + + if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" +`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+` + } + + __%[1]s_debug "Calling $RequestComp" + #call the command store the output in $out and redirect stderr and stdout to null + # $Out is an array contains each line per element + Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null + + + # get directive from last line + [int]$Directive = $Out[-1].TrimStart(':') + if ($Directive -eq "") { + # There is no directive specified + $Directive = 0 + } + __%[1]s_debug "The completion directive is: $Directive" + + # remove directive (last element) from out + $Out = $Out | Where-Object { $_ -ne $Out[-1] } + __%[1]s_debug "The completions are: $Out" + + if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + } + + $Longest = 0 + $Values = $Out | ForEach-Object { + #Split the output in name and description +`+" $Name, $Description = $_.Split(\"`t\",2)"+` + __%[1]s_debug "Name: $Name Description: $Description" + + # Look for the longest completion so that we can format things nicely + if ($Longest -lt $Name.Length) { + $Longest = $Name.Length + } + + # Set the description to a one space string if there is none set. + # This is needed because the CompletionResult does not accept an empty string as argument + if (-Not $Description) { + $Description = " " + } + @{Name="$Name";Description="$Description"} + } + + + $Space = " " + if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { + # remove the space here + __%[1]s_debug "ShellCompDirectiveNoSpace is called" + $Space = "" + } + + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { + __%[1]s_debug "ShellCompDirectiveNoFileComp is called" + + if ($Values.Length -eq 0) { + # Just print an empty string here so the + # shell does not start to complete paths. + # We cannot use CompletionResult here because + # it does not accept an empty string as argument. + "" + return + } + } + + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } - var subCommandCases bytes.Buffer - generatePowerShellSubcommandCases(&subCommandCases, c, "") - fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String()) + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + # Join the flag back if we have a equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + + # Get the current mode + $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function + __%[1]s_debug "Mode: $Mode" + + $Values | ForEach-Object { + + # store temporay because switch will overwrite $_ + $comp = $_ + + # PowerShell supports three different completion modes + # - TabCompleteNext (default windows style - on each key press the next option is displayed) + # - Complete (works like bash) + # - MenuComplete (works like zsh) + # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function + + # CompletionResult Arguments: + # 1) CompletionText text to be used as the auto completion result + # 2) ListItemText text to be displayed in the suggestion list + # 3) ResultType type of completion result + # 4) ToolTip text for the tooltip with details about the object + + switch ($Mode) { + + # bash like + "Complete" { + + if ($Values.Length -eq 1) { + __%[1]s_debug "Only one completion left" + + # insert space after value + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + } else { + # Add the proper number of spaces to align the descriptions + while($comp.Name.Length -lt $Longest) { + $comp.Name = $comp.Name + " " + } + + # Check for empty description and only add parentheses if needed + if ($($comp.Description) -eq " " ) { + $Description = "" + } else { + $Description = " ($($comp.Description))" + } + + [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } + } + + # zsh like + "MenuComplete" { + # insert space after value + # MenuComplete will automatically show the ToolTip of + # the highlighted value at the bottom of the suggestions. + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + + # TabCompleteNext and in case we get something unknown + Default { + # Like MenuComplete but we don't want to add a space here because + # the user need to press space anyway to get the completion. + # Description will not be shown because thats not possible with TabCompleteNext + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + } + + } +} +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) +} + +func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genPowerShellComp(buf, c.Name(), includeDesc) _, err := buf.WriteTo(w) return err } -// GenPowerShellCompletionFile generates PowerShell completion file. -func (c *Command) GenPowerShellCompletionFile(filename string) error { +func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { outFile, err := os.Create(filename) if err != nil { return err } defer outFile.Close() - return c.GenPowerShellCompletion(outFile) + return c.genPowerShellCompletion(outFile, includeDesc) +} + +// GenPowerShellCompletionFile generates powershell completion file without descriptions. +func (c *Command) GenPowerShellCompletionFile(filename string) error { + return c.genPowerShellCompletionFile(filename, false) +} + +// GenPowerShellCompletion generates powershell completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletion(w io.Writer) error { + return c.genPowerShellCompletion(w, false) +} + +// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. +func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { + return c.genPowerShellCompletionFile(filename, true) +} + +// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { + return c.genPowerShellCompletion(w, true) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md index 55f154a68fc..c449f1e5c0f 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ b/vendor/github.com/spf13/cobra/powershell_completions.md @@ -1,16 +1,3 @@ # Generating PowerShell Completions For Your Own cobra.Command -Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -*Note*: PowerShell completions have not (yet?) been aligned to Cobra's generic shell completion support. This implies the PowerShell completions are not as rich as for other shells (see [What's not yet supported](#whats-not-yet-supported)), and may behave slightly differently. They are still very useful for PowerShell users. - -# What's supported - -- Completion for subcommands using their `.Short` description -- Completion for non-hidden flags using their `.Name` and `.Shorthand` - -# What's not yet supported - -- Command aliases -- Required, filename or custom flags (they will work like normal flags) -- Custom completion scripts +Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index 31c272036a9..d98a71e36f9 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -25,6 +25,8 @@ - [Moby (former Docker)](https://github.com/moby/moby) - [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) - [OpenShift](https://www.openshift.com/) +- [Ory Hydra](https://github.com/ory/hydra) +- [Ory Kratos](https://github.com/ory/kratos) - [Pouch](https://github.com/alibaba/pouch) - [ProjectAtomic (enterprise)](http://www.projectatomic.io/) - [Prototool](https://github.com/uber/prototool) @@ -32,4 +34,5 @@ - [Rclone](https://rclone.org/) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) +- [Twitch CLI](https://github.com/twitchdev/twitch-cli) - [Werf](https://werf.io/) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index d8416ab1dc9..cd533ac3d44 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -4,10 +4,10 @@ Cobra can generate shell completions for multiple shells. The currently supported shells are: - Bash - Zsh -- Fish +- fish - PowerShell -If you are using the generator you can create a completion command by running +If you are using the generator, you can create a completion command by running ```bash cobra add completion @@ -17,38 +17,46 @@ and then modifying the generated `cmd/completion.go` file to look something like ```go var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", Long: `To load completions: Bash: -$ source <(yourprogram completion bash) + $ source <(yourprogram completion bash) -# To load completions for each session, execute once: -Linux: + # To load completions for each session, execute once: + # Linux: $ yourprogram completion bash > /etc/bash_completion.d/yourprogram -MacOS: + # macOS: $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram Zsh: -# If shell completion is not already enabled in your environment you will need -# to enable it. You can execute the following once: + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: -$ echo "autoload -U compinit; compinit" >> ~/.zshrc + $ echo "autoload -U compinit; compinit" >> ~/.zshrc -# To load completions for each session, execute once: -$ yourprogram completion zsh > "${fpath[1]}/_yourprogram" + # To load completions for each session, execute once: + $ yourprogram completion zsh > "${fpath[1]}/_yourprogram" -# You will need to start a new shell for this setup to take effect. + # You will need to start a new shell for this setup to take effect. -Fish: +fish: -$ yourprogram completion fish | source + $ yourprogram completion fish | source -# To load completions for each session, execute once: -$ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish + # To load completions for each session, execute once: + $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish + +PowerShell: + + PS> yourprogram completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> yourprogram completion powershell > yourprogram.ps1 + # and source this file from your PowerShell profile. `, DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, @@ -68,7 +76,7 @@ $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish } ``` -**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script so must be removed. +**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. # Customizing completions @@ -91,8 +99,7 @@ cmd := &cobra.Command{ Long: get_long, Example: get_example, Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) + cobra.CheckErr(RunGet(f, out, cmd, args)) }, ValidArgs: validArgs, } @@ -124,7 +131,7 @@ the completion algorithm if entered manually, e.g. in: ```bash $ kubectl get rc [tab][tab] -backend frontend database +backend frontend database ``` Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of @@ -246,7 +253,7 @@ and you'll get something like ```bash $ kubectl exec [tab][tab] --c --container= -p --pod= +-c --container= -p --pod= ``` ### Specify dynamic flag completion @@ -316,7 +323,7 @@ cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, ``` ### Descriptions for completions -Both `zsh` and `fish` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh: +`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh: ``` $ helm s[tab] search -- search for a keyword in charts @@ -361,12 +368,12 @@ completion firstcommand secondcommand ``` ### Bash legacy dynamic completions -For backwards-compatibility, Cobra still supports its bash legacy dynamic completion solution. +For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. Please refer to [Bash Completions](bash_completions.md) for details. ## Zsh completions -Cobra supports native Zsh completion generated from the root `cobra.Command`. +Cobra supports native zsh completion generated from the root `cobra.Command`. The generated completion script should be put somewhere in your `$fpath` and be named `_`. You will need to start a new shell for the completions to become available. @@ -385,23 +392,23 @@ status -- displays the status of the named release $ helm s[tab] search show status ``` -*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`. +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. ### Limitations * Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). * The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. * You should instead use `RegisterFlagCompletionFunc()`. ### Zsh completions standardization -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. Please refer to [Zsh Completions](zsh_completions.md) for details. -## Fish completions +## fish completions -Cobra supports native Fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. +Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. ``` # With descriptions $ helm s[tab] @@ -411,12 +418,12 @@ search (search for a keyword in charts) show (show information of a chart) s $ helm s[tab] search show status ``` -*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`. +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. ### Limitations -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`). +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). * The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. * You should instead use `RegisterFlagCompletionFunc()`. * The following flag completion annotations are not supported and will be ignored for `fish`: @@ -431,4 +438,46 @@ search show status ## PowerShell completions -Please refer to [PowerShell Completions](powershell_completions.md) for details. +Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. + +The script is designed to support all three PowerShell completion modes: + +* TabCompleteNext (default windows style - on each key press the next option is displayed) +* Complete (works like bash) +* MenuComplete (works like zsh) + +You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. + +Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. + +``` +# With descriptions and Mode 'Complete' +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. +$ helm s[tab] +search show status + +search for a keyword in charts + +# Without descriptions +$ helm s[tab] +search show status +``` + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `powershell`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `powershell`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 92a70394a9d..2e840285f38 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -70,12 +70,12 @@ func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { return err } -func genZshComp(buf *bytes.Buffer, name string, includeDesc bool) { +func genZshComp(buf io.StringWriter, name string, includeDesc bool) { compCmd := ShellCompRequestCmd if !includeDesc { compCmd = ShellCompNoDescRequestCmd } - buf.WriteString(fmt.Sprintf(`#compdef _%[1]s %[1]s + WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s # zsh completion for %-36[1]s -*- shell-script -*- diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod deleted file mode 100644 index e80d1a10d4f..00000000000 --- a/vendor/github.com/vbauerster/mpb/v5/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/vbauerster/mpb/v5 - -require ( - github.com/VividCortex/ewma v1.1.1 - github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d - github.com/mattn/go-runewidth v0.0.9 - golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 -) - -go 1.14 diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum deleted file mode 100644 index 62cc10af09a..00000000000 --- a/vendor/github.com/vbauerster/mpb/v5/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= -github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= -github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= -github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ= -golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/vbauerster/mpb/v5/internal/width.go b/vendor/github.com/vbauerster/mpb/v5/internal/width.go deleted file mode 100644 index 35d52898390..00000000000 --- a/vendor/github.com/vbauerster/mpb/v5/internal/width.go +++ /dev/null @@ -1,8 +0,0 @@ -package internal - -func WidthForBarFiller(reqWidth, available int) int { - if reqWidth <= 0 || reqWidth >= available { - return available - } - return reqWidth -} diff --git a/vendor/github.com/vbauerster/mpb/v5/.gitignore b/vendor/github.com/vbauerster/mpb/v6/.gitignore similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/.gitignore rename to vendor/github.com/vbauerster/mpb/v6/.gitignore diff --git a/vendor/github.com/vbauerster/mpb/v5/.travis.yml b/vendor/github.com/vbauerster/mpb/v6/.travis.yml similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/.travis.yml rename to vendor/github.com/vbauerster/mpb/v6/.travis.yml diff --git a/vendor/github.com/vbauerster/mpb/v5/README.md b/vendor/github.com/vbauerster/mpb/v6/README.md similarity index 83% rename from vendor/github.com/vbauerster/mpb/v5/README.md rename to vendor/github.com/vbauerster/mpb/v6/README.md index bfb0c4d1805..a87786d726a 100644 --- a/vendor/github.com/vbauerster/mpb/v5/README.md +++ b/vendor/github.com/vbauerster/mpb/v6/README.md @@ -1,6 +1,6 @@ # Multi Progress Bar -[![GoDoc](https://godoc.org/github.com/vbauerster/mpb?status.svg)](https://godoc.org/github.com/vbauerster/mpb) +[![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v6) [![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb) [![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb) @@ -8,16 +8,17 @@ ## Features -* __Multiple Bars__: Multiple progress bars are supported -* __Dynamic Total__: Set total while bar is running -* __Dynamic Add/Remove__: Dynamically add or remove bars -* __Cancellation__: Cancel whole rendering process -* __Predefined Decorators__: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter -* __Decorator's width sync__: Synchronized decorator's width among multiple bars +- **Multiple Bars**: Multiple progress bars are supported +- **Dynamic Total**: Set total while bar is running +- **Dynamic Add/Remove**: Dynamically add or remove bars +- **Cancellation**: Cancel whole rendering process +- **Predefined Decorators**: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter +- **Decorator's width sync**: Synchronized decorator's width among multiple bars ## Usage #### [Rendering single bar](_examples/singleBar/main.go) + ```go package main @@ -25,8 +26,8 @@ import ( "math/rand" "time" - "github.com/vbauerster/mpb/v5" - "github.com/vbauerster/mpb/v5/decor" + "github.com/vbauerster/mpb/v6" + "github.com/vbauerster/mpb/v6/decor" ) func main() { @@ -36,9 +37,9 @@ func main() { total := 100 name := "Single Bar:" // adding a single bar, which will inherit container's width - bar := p.AddBar(int64(total), - // override DefaultBarStyle, which is "[=>-]<+" - mpb.BarStyle("╢▌▌░╟"), + bar := p.Add(int64(total), + // progress bar filler with customized style + mpb.NewBarFiller("╢▌▌░╟"), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), @@ -61,6 +62,7 @@ func main() { ``` #### [Rendering multiple bars](_examples/multiBars/main.go) + ```go var wg sync.WaitGroup // pass &wg (optional), so p will wait for it eventually diff --git a/vendor/github.com/vbauerster/mpb/v5/UNLICENSE b/vendor/github.com/vbauerster/mpb/v6/UNLICENSE similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/UNLICENSE rename to vendor/github.com/vbauerster/mpb/v6/UNLICENSE diff --git a/vendor/github.com/vbauerster/mpb/v5/bar.go b/vendor/github.com/vbauerster/mpb/v6/bar.go similarity index 91% rename from vendor/github.com/vbauerster/mpb/v5/bar.go rename to vendor/github.com/vbauerster/mpb/v6/bar.go index 358cb048d3e..f18ef967faa 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar.go +++ b/vendor/github.com/vbauerster/mpb/v6/bar.go @@ -12,10 +12,10 @@ import ( "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" - "github.com/vbauerster/mpb/v5/decor" + "github.com/vbauerster/mpb/v6/decor" ) -// Bar represents a progress Bar. +// Bar represents a progress bar. type Bar struct { priority int // used by heap index int // used by heap @@ -42,8 +42,10 @@ type Bar struct { recoveredPanic interface{} } -type extFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int) +type extenderFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int) +// bState is actual bar state. It gets passed to *Bar.serve(...) monitor +// goroutine. type bState struct { id int priority int @@ -54,9 +56,9 @@ type bState struct { lastN int64 iterated bool trimSpace bool - toComplete bool + completed bool completeFlushed bool - ignoreComplete bool + triggerComplete bool dropOnComplete bool noPop bool aDecorators []decor.Decorator @@ -67,7 +69,7 @@ type bState struct { bufP, bufB, bufA *bytes.Buffer filler BarFiller middleware func(BarFiller) BarFiller - extender extFunc + extender extenderFunc // runningBar is a key for *pState.parkedBars runningBar *Bar @@ -128,9 +130,10 @@ func (b *Bar) Current() int64 { } } -// SetRefill fills bar with refill rune up to amount argument. -// Given default bar style is "[=>-]<+", refill rune is '+'. -// To set bar style use mpb.BarStyle(string) BarOption. +// SetRefill sets refill flag with specified amount. +// The underlying BarFiller will change its visual representation, to +// indicate refill event. Refill event may be referred to some retry +// operation for example. func (b *Bar) SetRefill(amount int64) { select { case b.operateState <- func(s *bState) { @@ -159,19 +162,18 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { // SetTotal sets total dynamically. // If total is less than or equal to zero it takes progress' current value. -// A complete flag enables or disables complete event on `current >= total`. -func (b *Bar) SetTotal(total int64, complete bool) { +func (b *Bar) SetTotal(total int64, triggerComplete bool) { select { case b.operateState <- func(s *bState) { - s.ignoreComplete = !complete + s.triggerComplete = triggerComplete if total <= 0 { s.total = s.current } else { s.total = total } - if !s.ignoreComplete && !s.toComplete { + if s.triggerComplete && !s.completed { s.current = s.total - s.toComplete = true + s.completed = true go b.refreshTillShutdown() } }: @@ -187,9 +189,9 @@ func (b *Bar) SetCurrent(current int64) { s.iterated = true s.lastN = current - s.current s.current = current - if !s.ignoreComplete && s.current >= s.total { + if s.triggerComplete && s.current >= s.total { s.current = s.total - s.toComplete = true + s.completed = true go b.refreshTillShutdown() } }: @@ -214,9 +216,9 @@ func (b *Bar) IncrInt64(n int64) { s.iterated = true s.lastN = n s.current += n - if !s.ignoreComplete && s.current >= s.total { + if s.triggerComplete && s.current >= s.total { s.current = s.total - s.toComplete = true + s.completed = true go b.refreshTillShutdown() } }: @@ -280,7 +282,7 @@ func (b *Bar) Abort(drop bool) { // Completed reports whether the bar is in completed state. func (b *Bar) Completed() bool { select { - case b.operateState <- func(s *bState) { b.completed <- s.toComplete }: + case b.operateState <- func(s *bState) { b.completed <- s.completed }: return <-b.completed case <-b.done: return true @@ -322,11 +324,11 @@ func (b *Bar) render(tw int) { b.frameCh <- frame b.dlogger.Println(p) } - s.completeFlushed = s.toComplete + s.completeFlushed = s.completed }() frame, lines := s.extender(s.draw(stat), s.reqWidth, stat) b.extendedLines = lines - b.toShutdown = s.toComplete && !s.completeFlushed + b.toShutdown = s.completed && !s.completeFlushed b.frameCh <- frame }: case <-b.done: @@ -475,7 +477,7 @@ func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) { } } -func makePanicExtender(p interface{}) extFunc { +func makePanicExtender(p interface{}) extenderFunc { pstr := fmt.Sprint(p) stack := debug.Stack() stackLines := bytes.Count(stack, []byte("\n")) diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go b/vendor/github.com/vbauerster/mpb/v6/bar_filler.go similarity index 66% rename from vendor/github.com/vbauerster/mpb/v5/bar_filler.go rename to vendor/github.com/vbauerster/mpb/v6/bar_filler.go index 07148bffbc6..c8cedaa4a02 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v6/bar_filler.go @@ -3,19 +3,20 @@ package mpb import ( "io" - "github.com/vbauerster/mpb/v5/decor" + "github.com/vbauerster/mpb/v6/decor" ) // BarFiller interface. // Bar (without decorators) renders itself by calling BarFiller's Fill method. // -// `reqWidth` is requested width, which is set via: -// func WithWidth(width int) ContainerOption -// func BarWidth(width int) BarOption +// reqWidth is requested width, set by `func WithWidth(int) ContainerOption`. +// If not set, it defaults to terminal width. // // Default implementations can be obtained via: // -// func NewBarFiller(style string, reverse bool) BarFiller +// func NewBarFiller(style string) BarFiller +// func NewBarFillerRev(style string) BarFiller +// func NewBarFillerPick(style string, rev bool) BarFiller // func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller // type BarFiller interface { diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v6/bar_filler_bar.go similarity index 55% rename from vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go rename to vendor/github.com/vbauerster/mpb/v6/bar_filler_bar.go index 637bd88ca17..1c339e91d42 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v6/bar_filler_bar.go @@ -6,8 +6,9 @@ import ( "unicode/utf8" "github.com/mattn/go-runewidth" - "github.com/vbauerster/mpb/v5/decor" - "github.com/vbauerster/mpb/v5/internal" + "github.com/rivo/uniseg" + "github.com/vbauerster/mpb/v6/decor" + "github.com/vbauerster/mpb/v6/internal" ) const ( @@ -20,8 +21,8 @@ const ( rRefill ) -// DefaultBarStyle is a string containing 7 runes. -// Each rune is a building block of a progress bar. +// BarDefaultStyle is a style for rendering a progress bar. +// It consist of 7 ordered runes: // // '1st rune' stands for left boundary rune // @@ -37,7 +38,7 @@ const ( // // '7th rune' stands for refill rune // -const DefaultBarStyle string = "[=>-]<+" +const BarDefaultStyle string = "[=>-]<+" type barFiller struct { format [][]byte @@ -54,55 +55,72 @@ type space struct { count int } -// NewBarFiller constucts mpb.BarFiller, to be used with *Progress.Add(...) *Bar method. -func NewBarFiller(style string, reverse bool) BarFiller { +// NewBarFiller returns a BarFiller implementation which renders a +// progress bar in regular direction. If style is empty string, +// BarDefaultStyle is applied. To be used with `*Progress.Add(...) +// *Bar` method. +func NewBarFiller(style string) BarFiller { + return newBarFiller(style, false) +} + +// NewBarFillerRev returns a BarFiller implementation which renders a +// progress bar in reverse direction. If style is empty string, +// BarDefaultStyle is applied. To be used with `*Progress.Add(...) +// *Bar` method. +func NewBarFillerRev(style string) BarFiller { + return newBarFiller(style, true) +} + +// NewBarFillerPick pick between regular and reverse BarFiller implementation +// based on rev param. To be used with `*Progress.Add(...) *Bar` method. +func NewBarFillerPick(style string, rev bool) BarFiller { + return newBarFiller(style, rev) +} + +func newBarFiller(style string, rev bool) BarFiller { bf := &barFiller{ - format: make([][]byte, len(DefaultBarStyle)), - rwidth: make([]int, len(DefaultBarStyle)), - reverse: reverse, + format: make([][]byte, len(BarDefaultStyle)), + rwidth: make([]int, len(BarDefaultStyle)), + reverse: rev, + } + bf.parse(BarDefaultStyle) + if style != "" && style != BarDefaultStyle { + bf.parse(style) } - bf.SetStyle(style) return bf } -func (s *barFiller) SetStyle(style string) { +func (s *barFiller) parse(style string) { if !utf8.ValidString(style) { panic("invalid bar style") } - if style == "" { - style = DefaultBarStyle - } - src := make([][]byte, utf8.RuneCountInString(style)) + srcFormat := make([][]byte, len(BarDefaultStyle)) + srcRwidth := make([]int, len(BarDefaultStyle)) i := 0 - for _, r := range style { - s.rwidth[i] = runewidth.RuneWidth(r) - src[i] = []byte(string(r)) - i++ + for gr := uniseg.NewGraphemes(style); i < len(BarDefaultStyle) && gr.Next(); i++ { + srcFormat[i] = gr.Bytes() + srcRwidth[i] = runewidth.StringWidth(gr.Str()) } - copy(s.format, src) - s.SetReverse(s.reverse) -} - -func (s *barFiller) SetReverse(reverse bool) { - if reverse { + copy(s.format, srcFormat[:i]) + copy(s.rwidth, srcRwidth[:i]) + if s.reverse { s.tip = s.format[rRevTip] s.flush = reverseFlush } else { s.tip = s.format[rTip] s.flush = regularFlush } - s.reverse = reverse } func (s *barFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { - width := internal.WidthForBarFiller(reqWidth, stat.AvailableWidth) - - if brackets := s.rwidth[rLeft] + s.rwidth[rRight]; width < brackets { + width := internal.CheckRequestedWidth(reqWidth, stat.AvailableWidth) + brackets := s.rwidth[rLeft] + s.rwidth[rRight] + if width < brackets { return - } else { - // don't count brackets as progress - width -= brackets } + // don't count brackets as progress + width -= brackets + w.Write(s.format[rLeft]) defer w.Write(s.format[rRight]) diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v6/bar_filler_spinner.go similarity index 64% rename from vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go rename to vendor/github.com/vbauerster/mpb/v6/bar_filler_spinner.go index d2cb2b7260b..0817b19ec46 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v6/bar_filler_spinner.go @@ -3,10 +3,10 @@ package mpb import ( "io" "strings" - "unicode/utf8" - "github.com/vbauerster/mpb/v5/decor" - "github.com/vbauerster/mpb/v5/internal" + "github.com/mattn/go-runewidth" + "github.com/vbauerster/mpb/v6/decor" + "github.com/vbauerster/mpb/v6/internal" ) // SpinnerAlignment enum. @@ -19,8 +19,8 @@ const ( SpinnerOnRight ) -// DefaultSpinnerStyle is a slice of strings, which makes a spinner. -var DefaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} +// SpinnerDefaultStyle is a style for rendering a spinner. +var SpinnerDefaultStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} type spinnerFiller struct { frames []string @@ -28,10 +28,12 @@ type spinnerFiller struct { alignment SpinnerAlignment } -// NewSpinnerFiller constucts mpb.BarFiller, to be used with *Progress.Add(...) *Bar method. +// NewSpinnerFiller returns a BarFiller implementation which renders +// a spinner. If style is nil or zero length, SpinnerDefaultStyle is +// applied. To be used with `*Progress.Add(...) *Bar` method. func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller { if len(style) == 0 { - style = DefaultSpinnerStyle + style = SpinnerDefaultStyle } filler := &spinnerFiller{ frames: style, @@ -41,10 +43,10 @@ func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller { } func (s *spinnerFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { - width := internal.WidthForBarFiller(reqWidth, stat.AvailableWidth) + width := internal.CheckRequestedWidth(reqWidth, stat.AvailableWidth) frame := s.frames[s.count%uint(len(s.frames))] - frameWidth := utf8.RuneCountInString(frame) + frameWidth := runewidth.StringWidth(frame) if width < frameWidth { return diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_option.go b/vendor/github.com/vbauerster/mpb/v6/bar_option.go similarity index 58% rename from vendor/github.com/vbauerster/mpb/v5/bar_option.go rename to vendor/github.com/vbauerster/mpb/v6/bar_option.go index e7d2e41f9d9..e359c110c6e 100644 --- a/vendor/github.com/vbauerster/mpb/v5/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v6/bar_option.go @@ -4,10 +4,11 @@ import ( "bytes" "io" - "github.com/vbauerster/mpb/v5/decor" + "github.com/vbauerster/mpb/v6/decor" + "github.com/vbauerster/mpb/v6/internal" ) -// BarOption is a function option which changes the default behavior of a bar. +// BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) { @@ -88,7 +89,7 @@ func BarFillerOnComplete(message string) BarOption { }) } -// BarFillerMiddleware provides a way to augment default BarFiller. +// BarFillerMiddleware provides a way to augment the underlying BarFiller. func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption { return func(s *bState) { s.middleware = middle @@ -104,18 +105,17 @@ func BarPriority(priority int) BarOption { } } -// BarExtender is an option to extend bar to the next new line, with -// arbitrary output. +// BarExtender provides a way to extend bar to the next new line. func BarExtender(filler BarFiller) BarOption { if filler == nil { return nil } return func(s *bState) { - s.extender = makeExtFunc(filler) + s.extender = makeExtenderFunc(filler) } } -func makeExtFunc(filler BarFiller) extFunc { +func makeExtenderFunc(filler BarFiller) extenderFunc { buf := new(bytes.Buffer) return func(r io.Reader, reqWidth int, st decor.Statistics) (io.Reader, int) { filler.Fill(buf, reqWidth, st) @@ -123,37 +123,13 @@ func makeExtFunc(filler BarFiller) extFunc { } } -// BarFillerTrim bar filler is rendered with leading and trailing space -// like ' [===] ' by default. With this option leading and trailing -// space will be removed. +// BarFillerTrim removes leading and trailing space around the underlying BarFiller. func BarFillerTrim() BarOption { return func(s *bState) { s.trimSpace = true } } -// TrimSpace is an alias to BarFillerTrim. -func TrimSpace() BarOption { - return BarFillerTrim() -} - -// BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+". -// It's ok to pass string containing just 5 runes, for example "╢▌▌░╟", -// if you don't need to override '<' (reverse tip) and '+' (refill rune). -func BarStyle(style string) BarOption { - if style == "" { - return nil - } - type styleSetter interface { - SetStyle(string) - } - return func(s *bState) { - if t, ok := s.filler.(styleSetter); ok { - t.SetStyle(style) - } - } -} - // BarNoPop disables bar pop out of container. Effective when // PopCompletedMode of container is enabled. func BarNoPop() BarOption { @@ -162,51 +138,15 @@ func BarNoPop() BarOption { } } -// BarReverse reverse mode, bar will progress from right to left. -func BarReverse() BarOption { - type revSetter interface { - SetReverse(bool) - } - return func(s *bState) { - if t, ok := s.filler.(revSetter); ok { - t.SetReverse(true) - } - } -} - -// SpinnerStyle sets custom spinner style. -// Effective when Filler type is spinner. -func SpinnerStyle(frames []string) BarOption { - if len(frames) == 0 { - return nil - } - chk := func(filler BarFiller) (interface{}, bool) { - t, ok := filler.(*spinnerFiller) - return t, ok - } - cb := func(t interface{}) { - t.(*spinnerFiller).frames = frames - } - return MakeFillerTypeSpecificBarOption(chk, cb) -} - -// MakeFillerTypeSpecificBarOption makes BarOption specific to Filler's -// actual type. If you implement your own Filler, so most probably -// you'll need this. See BarStyle or SpinnerStyle for example. -func MakeFillerTypeSpecificBarOption( - typeChecker func(BarFiller) (interface{}, bool), - cb func(interface{}), -) BarOption { - return func(s *bState) { - if t, ok := typeChecker(s.filler); ok { - cb(t) - } - } +// BarOptional will invoke provided option only when pick is true. +func BarOptional(option BarOption, pick bool) BarOption { + return BarOptOn(option, internal.Predicate(pick)) } -// BarOptOn returns option when condition evaluates to true. -func BarOptOn(option BarOption, condition func() bool) BarOption { - if condition() { +// BarOptOn will invoke provided option only when higher order predicate +// evaluates to true. +func BarOptOn(option BarOption, predicate func() bool) BarOption { + if predicate() { return option } return nil diff --git a/vendor/github.com/vbauerster/mpb/v5/container_option.go b/vendor/github.com/vbauerster/mpb/v6/container_option.go similarity index 70% rename from vendor/github.com/vbauerster/mpb/v5/container_option.go rename to vendor/github.com/vbauerster/mpb/v6/container_option.go index fac59e43653..b92c7578c4e 100644 --- a/vendor/github.com/vbauerster/mpb/v5/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v6/container_option.go @@ -5,10 +5,13 @@ import ( "io/ioutil" "sync" "time" + + "github.com/vbauerster/mpb/v6/internal" ) -// ContainerOption is a function option which changes the default -// behavior of progress container, if passed to mpb.New(...ContainerOption). +// ContainerOption is a func option to alter default behavior of a bar +// container. Container term refers to a Progress struct which can +// hold one or more Bars. type ContainerOption func(*pState) // WithWaitGroup provides means to have a single joint point. If @@ -21,8 +24,9 @@ func WithWaitGroup(wg *sync.WaitGroup) ContainerOption { } } -// WithWidth sets container width. If not set underlying bars will -// occupy whole term width. +// WithWidth sets container width. If not set it defaults to terminal +// width. A bar added to the container will inherit its width, unless +// overridden by `func BarWidth(int) BarOption`. func WithWidth(width int) ContainerOption { return func(s *pState) { s.reqWidth = width @@ -38,9 +42,9 @@ func WithRefreshRate(d time.Duration) ContainerOption { // WithManualRefresh disables internal auto refresh time.Ticker. // Refresh will occur upon receive value from provided ch. -func WithManualRefresh(ch <-chan time.Time) ContainerOption { +func WithManualRefresh(ch <-chan interface{}) ContainerOption { return func(s *pState) { - s.refreshSrc = ch + s.externalRefresh = ch } } @@ -68,8 +72,8 @@ func WithShutdownNotifier(ch chan struct{}) ContainerOption { func WithOutput(w io.Writer) ContainerOption { return func(s *pState) { if w == nil { - s.refreshSrc = make(chan time.Time) s.output = ioutil.Discard + s.outputDiscarded = true return } s.output = w @@ -93,9 +97,15 @@ func PopCompletedMode() ContainerOption { } } -// ContainerOptOn returns option when condition evaluates to true. -func ContainerOptOn(option ContainerOption, condition func() bool) ContainerOption { - if condition() { +// ContainerOptional will invoke provided option only when pick is true. +func ContainerOptional(option ContainerOption, pick bool) ContainerOption { + return ContainerOptOn(option, internal.Predicate(pick)) +} + +// ContainerOptOn will invoke provided option only when higher order +// predicate evaluates to true. +func ContainerOptOn(option ContainerOption, predicate func() bool) ContainerOption { + if predicate() { return option } return nil diff --git a/vendor/github.com/vbauerster/mpb/v6/cwriter/doc.go b/vendor/github.com/vbauerster/mpb/v6/cwriter/doc.go new file mode 100644 index 00000000000..93c8f8268d6 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v6/cwriter/doc.go @@ -0,0 +1,2 @@ +// Package cwriter is a console writer abstraction for the underlying OS. +package cwriter diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go b/vendor/github.com/vbauerster/mpb/v6/cwriter/util_bsd.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go rename to vendor/github.com/vbauerster/mpb/v6/cwriter/util_bsd.go diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go b/vendor/github.com/vbauerster/mpb/v6/cwriter/util_linux.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go rename to vendor/github.com/vbauerster/mpb/v6/cwriter/util_linux.go diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go b/vendor/github.com/vbauerster/mpb/v6/cwriter/util_solaris.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go rename to vendor/github.com/vbauerster/mpb/v6/cwriter/util_solaris.go diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v6/cwriter/writer.go similarity index 90% rename from vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go rename to vendor/github.com/vbauerster/mpb/v6/cwriter/writer.go index 6f57875c6c1..1ade54761dd 100644 --- a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go +++ b/vendor/github.com/vbauerster/mpb/v6/cwriter/writer.go @@ -8,8 +8,8 @@ import ( "strconv" ) -// NotATTY not a TeleTYpewriter error. -var NotATTY = errors.New("not a terminal") +// ErrNotTTY not a TeleTYpewriter error. +var ErrNotTTY = errors.New("not a terminal") // http://ascii-table.com/ansi-escape-sequences.php const ( @@ -39,7 +39,7 @@ func New(out io.Writer) *Writer { // Flush flushes the underlying buffer. func (w *Writer) Flush(lineCount int) (err error) { - // some terminals interpret clear 0 lines as clear 1 + // some terminals interpret 'cursor up 0' as 'cursor up 1' if w.lineCount > 0 { err = w.clearLines() if err != nil { @@ -70,7 +70,7 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { // GetWidth returns width of underlying terminal. func (w *Writer) GetWidth() (int, error) { if !w.isTerminal { - return -1, NotATTY + return -1, ErrNotTTY } tw, _, err := GetSize(w.fd) return tw, err diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/v6/cwriter/writer_posix.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go rename to vendor/github.com/vbauerster/mpb/v6/cwriter/writer_posix.go diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v6/cwriter/writer_windows.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go rename to vendor/github.com/vbauerster/mpb/v6/cwriter/writer_windows.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/any.go b/vendor/github.com/vbauerster/mpb/v6/decor/any.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/any.go rename to vendor/github.com/vbauerster/mpb/v6/decor/any.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/counters.go b/vendor/github.com/vbauerster/mpb/v6/decor/counters.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/counters.go rename to vendor/github.com/vbauerster/mpb/v6/decor/counters.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v6/decor/decorator.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/decorator.go rename to vendor/github.com/vbauerster/mpb/v6/decor/decorator.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/doc.go b/vendor/github.com/vbauerster/mpb/v6/decor/doc.go similarity index 86% rename from vendor/github.com/vbauerster/mpb/v5/decor/doc.go rename to vendor/github.com/vbauerster/mpb/v6/decor/doc.go index 6d26144510d..bfbb82e1168 100644 --- a/vendor/github.com/vbauerster/mpb/v5/decor/doc.go +++ b/vendor/github.com/vbauerster/mpb/v6/decor/doc.go @@ -1,6 +1,5 @@ +// Package decor provides common decorators for "github.com/vbauerster/mpb/v6" module. /* - Package decor provides common decorators for "github.com/vbauerster/mpb/v5" module. - Some decorators returned by this package might have a closure state. It is ok to use decorators concurrently, unless you share the same decorator among multiple *mpb.Bar instances. To avoid data races, create new decorator per *mpb.Bar instance. diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/v6/decor/elapsed.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go rename to vendor/github.com/vbauerster/mpb/v6/decor/elapsed.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/eta.go b/vendor/github.com/vbauerster/mpb/v6/decor/eta.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/eta.go rename to vendor/github.com/vbauerster/mpb/v6/decor/eta.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/merge.go b/vendor/github.com/vbauerster/mpb/v6/decor/merge.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/merge.go rename to vendor/github.com/vbauerster/mpb/v6/decor/merge.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/moving_average.go b/vendor/github.com/vbauerster/mpb/v6/decor/moving_average.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/moving_average.go rename to vendor/github.com/vbauerster/mpb/v6/decor/moving_average.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/name.go b/vendor/github.com/vbauerster/mpb/v6/decor/name.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/name.go rename to vendor/github.com/vbauerster/mpb/v6/decor/name.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v6/decor/on_complete.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go rename to vendor/github.com/vbauerster/mpb/v6/decor/on_complete.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v6/decor/percentage.go similarity index 96% rename from vendor/github.com/vbauerster/mpb/v5/decor/percentage.go rename to vendor/github.com/vbauerster/mpb/v6/decor/percentage.go index d6314a61989..f4922bb52ce 100644 --- a/vendor/github.com/vbauerster/mpb/v5/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v6/decor/percentage.go @@ -5,7 +5,7 @@ import ( "io" "strconv" - "github.com/vbauerster/mpb/v5/internal" + "github.com/vbauerster/mpb/v6/internal" ) type percentageType float64 diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v6/decor/size_type.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/size_type.go rename to vendor/github.com/vbauerster/mpb/v6/decor/size_type.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/sizeb1000_string.go b/vendor/github.com/vbauerster/mpb/v6/decor/sizeb1000_string.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/sizeb1000_string.go rename to vendor/github.com/vbauerster/mpb/v6/decor/sizeb1000_string.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/sizeb1024_string.go b/vendor/github.com/vbauerster/mpb/v6/decor/sizeb1024_string.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/sizeb1024_string.go rename to vendor/github.com/vbauerster/mpb/v6/decor/sizeb1024_string.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/speed.go b/vendor/github.com/vbauerster/mpb/v6/decor/speed.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/speed.go rename to vendor/github.com/vbauerster/mpb/v6/decor/speed.go diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/spinner.go b/vendor/github.com/vbauerster/mpb/v6/decor/spinner.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/decor/spinner.go rename to vendor/github.com/vbauerster/mpb/v6/decor/spinner.go diff --git a/vendor/github.com/vbauerster/mpb/v5/doc.go b/vendor/github.com/vbauerster/mpb/v6/doc.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/doc.go rename to vendor/github.com/vbauerster/mpb/v6/doc.go diff --git a/vendor/github.com/vbauerster/mpb/v6/go.mod b/vendor/github.com/vbauerster/mpb/v6/go.mod new file mode 100644 index 00000000000..55d523ed69d --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v6/go.mod @@ -0,0 +1,11 @@ +module github.com/vbauerster/mpb/v6 + +require ( + github.com/VividCortex/ewma v1.1.1 + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d + github.com/mattn/go-runewidth v0.0.10 + github.com/rivo/uniseg v0.2.0 + golang.org/x/sys v0.0.0-20210324051608-47abb6519492 +) + +go 1.14 diff --git a/vendor/github.com/vbauerster/mpb/v6/go.sum b/vendor/github.com/vbauerster/mpb/v6/go.sum new file mode 100644 index 00000000000..4809b4a1916 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v6/go.sum @@ -0,0 +1,11 @@ +github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v6/internal/percentage.go similarity index 87% rename from vendor/github.com/vbauerster/mpb/v5/internal/percentage.go rename to vendor/github.com/vbauerster/mpb/v6/internal/percentage.go index e321e0a6b56..a8ef8be1250 100644 --- a/vendor/github.com/vbauerster/mpb/v5/internal/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v6/internal/percentage.go @@ -13,6 +13,7 @@ func Percentage(total, current int64, width int) float64 { return float64(int64(width)*current) / float64(total) } +// PercentageRound same as Percentage but with math.Round. func PercentageRound(total, current int64, width int) float64 { return math.Round(Percentage(total, current, width)) } diff --git a/vendor/github.com/vbauerster/mpb/v6/internal/predicate.go b/vendor/github.com/vbauerster/mpb/v6/internal/predicate.go new file mode 100644 index 00000000000..1e4dd24d9da --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v6/internal/predicate.go @@ -0,0 +1,6 @@ +package internal + +// Predicate helper for internal use. +func Predicate(pick bool) func() bool { + return func() bool { return pick } +} diff --git a/vendor/github.com/vbauerster/mpb/v6/internal/width.go b/vendor/github.com/vbauerster/mpb/v6/internal/width.go new file mode 100644 index 00000000000..216320f249a --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v6/internal/width.go @@ -0,0 +1,10 @@ +package internal + +// CheckRequestedWidth checks that requested width doesn't overflow +// available width +func CheckRequestedWidth(requested, available int) int { + if requested <= 0 || requested >= available { + return available + } + return requested +} diff --git a/vendor/github.com/vbauerster/mpb/v5/priority_queue.go b/vendor/github.com/vbauerster/mpb/v6/priority_queue.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/priority_queue.go rename to vendor/github.com/vbauerster/mpb/v6/priority_queue.go diff --git a/vendor/github.com/vbauerster/mpb/v5/progress.go b/vendor/github.com/vbauerster/mpb/v6/progress.go similarity index 85% rename from vendor/github.com/vbauerster/mpb/v5/progress.go rename to vendor/github.com/vbauerster/mpb/v6/progress.go index fb66ce05d6c..5a3f9624306 100644 --- a/vendor/github.com/vbauerster/mpb/v5/progress.go +++ b/vendor/github.com/vbauerster/mpb/v6/progress.go @@ -13,8 +13,8 @@ import ( "sync" "time" - "github.com/vbauerster/mpb/v5/cwriter" - "github.com/vbauerster/mpb/v5/decor" + "github.com/vbauerster/mpb/v6/cwriter" + "github.com/vbauerster/mpb/v6/decor" ) const ( @@ -22,7 +22,8 @@ const ( prr = 120 * time.Millisecond ) -// Progress represents the container that renders Progress bars +// Progress represents a container that renders one or more progress +// bars. type Progress struct { ctx context.Context uwg *sync.WaitGroup @@ -35,6 +36,8 @@ type Progress struct { dlogger *log.Logger } +// pState holds bars in its priorityQueue. It gets passed to +// *Progress.serve(...) monitor goroutine. type pState struct { bHeap priorityQueue heapUpdated bool @@ -46,9 +49,10 @@ type pState struct { idCount int reqWidth int popCompleted bool + outputDiscarded bool rr time.Duration uwg *sync.WaitGroup - refreshSrc <-chan time.Time + externalRefresh <-chan interface{} renderDelay <-chan struct{} shutdownNotifier chan struct{} parkedBars map[*Bar]*Bar @@ -95,18 +99,21 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { return p } -// AddBar creates a new progress bar and adds it to the rendering queue. +// AddBar creates a bar with default bar filler. Different filler can +// be choosen and applied via `*Progress.Add(...) *Bar` method. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(DefaultBarStyle, false), options...) + return p.Add(total, NewBarFiller(BarDefaultStyle), options...) } -// AddSpinner creates a new spinner bar and adds it to the rendering queue. +// AddSpinner creates a bar with default spinner filler. Different +// filler can be choosen and applied via `*Progress.Add(...) *Bar` +// method. func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar { - return p.Add(total, NewSpinnerFiller(DefaultSpinnerStyle, alignment), options...) + return p.Add(total, NewSpinnerFiller(SpinnerDefaultStyle, alignment), options...) } // Add creates a bar which renders itself by provided filler. -// Set total to 0, if you plan to update it later. +// If `total <= 0` trigger complete event is disabled until reset with *bar.SetTotal(int64, bool). // Panics if *Progress instance is done, i.e. called after *Progress.Wait(). func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { @@ -168,7 +175,7 @@ func (p *Progress) UpdateBarPriority(b *Bar, priority int) { p.setBarPriority(b, priority) } -// BarCount returns bars count +// BarCount returns bars count. func (p *Progress) BarCount() int { result := make(chan int, 1) select { @@ -234,15 +241,26 @@ func (s *pState) newTicker(done <-chan struct{}) chan time.Time { if s.renderDelay != nil { <-s.renderDelay } - if s.refreshSrc == nil { - ticker := time.NewTicker(s.rr) - defer ticker.Stop() - s.refreshSrc = ticker.C + var internalRefresh <-chan time.Time + if !s.outputDiscarded { + if s.externalRefresh == nil { + ticker := time.NewTicker(s.rr) + defer ticker.Stop() + internalRefresh = ticker.C + } + } else { + s.externalRefresh = nil } for { select { - case tick := <-s.refreshSrc: - ch <- tick + case t := <-internalRefresh: + ch <- t + case x := <-s.externalRefresh: + if t, ok := x.(time.Time); ok { + ch <- t + } else { + ch <- time.Now() + } case <-done: close(s.shutdownNotifier) return @@ -349,6 +367,10 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio debugOut: s.debugOut, } + if total > 0 { + bs.triggerComplete = true + } + for _, opt := range options { if opt != nil { opt(bs) diff --git a/vendor/github.com/vbauerster/mpb/v5/proxyreader.go b/vendor/github.com/vbauerster/mpb/v6/proxyreader.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v5/proxyreader.go rename to vendor/github.com/vbauerster/mpb/v6/proxyreader.go diff --git a/vendor/github.com/vmware/govmomi/.goreleaser.yml b/vendor/github.com/vmware/govmomi/.goreleaser.yml index 6a374cd9b4e..9e7edeb3c71 100644 --- a/vendor/github.com/vmware/govmomi/.goreleaser.yml +++ b/vendor/github.com/vmware/govmomi/.goreleaser.yml @@ -11,6 +11,7 @@ builds: - amd64 - 386 - arm64 + - mips64le env: - CGO_ENABLED=0 main: ./govc/main.go @@ -27,6 +28,7 @@ builds: - amd64 - 386 - arm64 + - mips64le env: - CGO_ENABLED=0 main: ./vcsim/main.go diff --git a/vendor/github.com/vmware/govmomi/README.md b/vendor/github.com/vmware/govmomi/README.md index db17cc66471..2bc127e2b20 100644 --- a/vendor/github.com/vmware/govmomi/README.md +++ b/vendor/github.com/vmware/govmomi/README.md @@ -89,6 +89,8 @@ Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes. * [OPS](https://github.com/nanovms/ops) +* [VMware Event Broker Appliance](https://github.com/vmware-samples/vcenter-event-broker-appliance/tree/development/vmware-event-router) + ## Related projects * [rbvmomi](https://github.com/vmware/rbvmomi) diff --git a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go index a119a4fcdc1..f25f6f2c113 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go @@ -135,6 +135,9 @@ func (l VirtualDeviceList) SelectByBackingInfo(backing types.BaseVirtualDeviceBa b := backing.(*types.VirtualEthernetCardDistributedVirtualPortBackingInfo) return a.Port.SwitchUuid == b.Port.SwitchUuid && a.Port.PortgroupKey == b.Port.PortgroupKey + case *types.VirtualEthernetCardOpaqueNetworkBackingInfo: + b := backing.(*types.VirtualEthernetCardOpaqueNetworkBackingInfo) + return a.OpaqueNetworkId == b.OpaqueNetworkId case *types.VirtualDiskFlatVer2BackingInfo: b := backing.(*types.VirtualDiskFlatVer2BackingInfo) if a.Parent != nil && b.Parent != nil { diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/vendor/github.com/vmware/govmomi/object/virtual_machine.go index 2f0022803e4..1d85047fc18 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_machine.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40,6 +40,34 @@ type VirtualMachine struct { Common } +// extractDiskLayoutFiles is a helper function used to extract file keys for +// all disk files attached to the virtual machine at the current point of +// running. +func extractDiskLayoutFiles(diskLayoutList []types.VirtualMachineFileLayoutExDiskLayout) []int { + var result []int + + for _, layoutExDisk := range diskLayoutList { + for _, link := range layoutExDisk.Chain { + for i := range link.FileKey { // diskDescriptor, diskExtent pairs + result = append(result, int(link.FileKey[i])) + } + } + } + + return result +} + +// removeKey is a helper function for removing a specific file key from a list +// of keys associated with disks attached to a virtual machine. +func removeKey(l *[]int, key int) { + for i, k := range *l { + if k == key { + *l = append((*l)[:i], (*l)[i+1:]...) + break + } + } +} + func NewVirtualMachine(c *vim25.Client, ref types.ManagedObjectReference) *VirtualMachine { return &VirtualMachine{ Common: NewCommon(c, ref), @@ -628,6 +656,63 @@ func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) } } +// SnapshotSize calculates the size of a given snapshot in bytes. If the +// snapshot is current, disk files not associated with any parent snapshot are +// included in size calculations. This allows for measuring and including the +// growth from the last fixed snapshot to the present state. +func SnapshotSize(info types.ManagedObjectReference, parent *types.ManagedObjectReference, vmlayout *types.VirtualMachineFileLayoutEx, isCurrent bool) int { + var fileKeyList []int + var parentFiles []int + var allSnapshotFiles []int + + diskFiles := extractDiskLayoutFiles(vmlayout.Disk) + + for _, layout := range vmlayout.Snapshot { + diskLayout := extractDiskLayoutFiles(layout.Disk) + allSnapshotFiles = append(allSnapshotFiles, diskLayout...) + + if layout.Key.Value == info.Value { + fileKeyList = append(fileKeyList, int(layout.DataKey)) // The .vmsn file + fileKeyList = append(fileKeyList, diskLayout...) // The .vmdk files + } else if parent != nil && layout.Key.Value == parent.Value { + parentFiles = append(parentFiles, diskLayout...) + } + } + + for _, parentFile := range parentFiles { + removeKey(&fileKeyList, parentFile) + } + + for _, file := range allSnapshotFiles { + removeKey(&diskFiles, file) + } + + fileKeyMap := make(map[int]types.VirtualMachineFileLayoutExFileInfo) + for _, file := range vmlayout.File { + fileKeyMap[int(file.Key)] = file + } + + size := 0 + + for _, fileKey := range fileKeyList { + file := fileKeyMap[fileKey] + if parent != nil || + (file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskDescriptor) && + file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskExtent)) { + size += int(file.Size) + } + } + + if isCurrent { + for _, diskFile := range diskFiles { + file := fileKeyMap[diskFile] + size += int(file.Size) + } + } + + return size +} + // FindSnapshot supports snapshot lookup by name, where name can be: // 1) snapshot ManagedObjectReference.Value (unique) // 2) snapshot name (may not be unique) diff --git a/vendor/github.com/vmware/govmomi/session/manager.go b/vendor/github.com/vmware/govmomi/session/manager.go index 24c6a2dbe0d..8689acd504a 100644 --- a/vendor/github.com/vmware/govmomi/session/manager.go +++ b/vendor/github.com/vmware/govmomi/session/manager.go @@ -281,3 +281,14 @@ func (sm *Manager) CloneSession(ctx context.Context, ticket string) error { sm.userSession = &res.Returnval return nil } + +func (sm *Manager) UpdateServiceMessage(ctx context.Context, message string) error { + req := types.UpdateServiceMessage{ + This: sm.Reference(), + Message: message, + } + + _, err := methods.UpdateServiceMessage(ctx, sm.client, &req) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/vim25/retry.go b/vendor/github.com/vmware/govmomi/vim25/retry.go index f10e2cb6c9d..bf663a10126 100644 --- a/vendor/github.com/vmware/govmomi/vim25/retry.go +++ b/vendor/github.com/vmware/govmomi/vim25/retry.go @@ -25,33 +25,40 @@ import ( type RetryFunc func(err error) (retry bool, delay time.Duration) -// TemporaryNetworkError returns a RetryFunc that retries up to a maximum of n -// times, only if the error returned by the RoundTrip function is a temporary -// network error (for example: a connect timeout). +// TemporaryNetworkError is deprecated. Use Retry() with RetryTemporaryNetworkError and retryAttempts instead. func TemporaryNetworkError(n int) RetryFunc { - return func(err error) (retry bool, delay time.Duration) { - var ok bool - - t, ok := err.(interface { - // Temporary is implemented by url.Error and net.Error - Temporary() bool - }) - if !ok { - // Never retry if this is not a Temporary error. - return false, 0 - } - - if !t.Temporary() { - return false, 0 + return func(err error) (bool, time.Duration) { + if IsTemporaryNetworkError(err) { + // Don't retry if we're out of tries. + if n--; n <= 0 { + return false, 0 + } + return true, 0 } + return false, 0 + } +} - // Don't retry if we're out of tries. - if n--; n <= 0 { - return false, 0 - } +// RetryTemporaryNetworkError returns a RetryFunc that returns IsTemporaryNetworkError(err) +func RetryTemporaryNetworkError(err error) (bool, time.Duration) { + return IsTemporaryNetworkError(err), 0 +} - return true, 0 +// IsTemporaryNetworkError returns false unless the error implements +// a Temporary() bool method such as url.Error and net.Error. +// Otherwise, returns the value of the Temporary() method. +func IsTemporaryNetworkError(err error) bool { + t, ok := err.(interface { + // Temporary is implemented by url.Error and net.Error + Temporary() bool + }) + + if !ok { + // Not a Temporary error. + return false } + + return t.Temporary() } type retry struct { @@ -60,7 +67,8 @@ type retry struct { // fn is a custom function that is called when an error occurs. // It returns whether or not to retry, and if so, how long to // delay before retrying. - fn RetryFunc + fn RetryFunc + maxRetryAttempts int } // Retry wraps the specified soap.RoundTripper and invokes the @@ -68,10 +76,16 @@ type retry struct { // retry the call, and if so, how long to wait before retrying. If // the result of this function is to not retry, the original error // is returned from the RoundTrip function. -func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { +// The soap.RoundTripper will return the original error if retryAttempts is specified and reached. +func Retry(roundTripper soap.RoundTripper, fn RetryFunc, retryAttempts ...int) soap.RoundTripper { r := &retry{ - roundTripper: roundTripper, - fn: fn, + roundTripper: roundTripper, + fn: fn, + maxRetryAttempts: 1, + } + + if len(retryAttempts) == 1 { + r.maxRetryAttempts = retryAttempts[0] } return r @@ -80,7 +94,7 @@ func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { func (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error { var err error - for { + for attempt := 0; attempt < r.maxRetryAttempts; attempt++ { err = r.roundTripper.RoundTrip(ctx, req, res) if err == nil { break diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/vendor/github.com/vmware/govmomi/vim25/soap/client.go index 531e797e3bd..637330462c7 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/client.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -560,7 +560,7 @@ type statusError struct { } // Temporary returns true for HTTP response codes that can be retried -// See vim25.TemporaryNetworkError +// See vim25.IsTemporaryNetworkError func (e *statusError) Temporary() bool { switch e.res.StatusCode { case http.StatusBadGateway: diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/error.go b/vendor/github.com/vmware/govmomi/vim25/soap/error.go index 46111556cbd..1e1508733a5 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/error.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/error.go @@ -17,6 +17,7 @@ limitations under the License. package soap import ( + "encoding/json" "fmt" "reflect" @@ -49,6 +50,15 @@ func (s soapFaultError) Error() string { return fmt.Sprintf("%s: %s", s.fault.Code, msg) } +func (s soapFaultError) MarshalJSON() ([]byte, error) { + out := struct { + Fault *Fault + }{ + Fault: s.fault, + } + return json.Marshal(out) +} + type vimFaultError struct { fault types.BaseMethodFault } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/if.go b/vendor/github.com/vmware/govmomi/vim25/types/if.go index 94a65fafaeb..d80255000f9 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/if.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/if.go @@ -798,7 +798,6 @@ func (b *DvsFilterConfig) GetDvsFilterConfig() *DvsFilterConfig { return b } type BaseDvsFilterConfig interface { GetDvsFilterConfig() *DvsFilterConfig - GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig } func init() { @@ -841,21 +840,12 @@ func (b *DvsNetworkRuleQualifier) GetDvsNetworkRuleQualifier() *DvsNetworkRuleQu type BaseDvsNetworkRuleQualifier interface { GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier - GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier } func init() { t["BaseDvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() } -func (b *DvsIpNetworkRuleQualifier) GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier { - return b -} - -type BaseDvsIpNetworkRuleQualifier interface { - GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier -} - func (b *DvsTrafficFilterConfig) GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig { return b } type BaseDvsTrafficFilterConfig interface { diff --git a/vendor/github.com/vmware/govmomi/vim25/types/types.go b/vendor/github.com/vmware/govmomi/vim25/types/types.go index b3533cd0649..ac5de63b446 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/types.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/types.go @@ -32821,7 +32821,7 @@ type NetworkSummary struct { Name string `xml:"name"` Accessible bool `xml:"accessible"` IpPoolName string `xml:"ipPoolName"` - IpPoolId int32 `xml:"ipPoolId,omitempty"` + IpPoolId *int32 `xml:"ipPoolId"` } func init() { diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index c474e5a8046..94c71ac1ac8 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.11 && gc && !purego // +build go1.11,gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 3e8a609fbd7..025b49897e3 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego // +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go index 2806c6325d9..da420b2e97b 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index a0774dde1ce..c5898db4658 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go index c41d0614855..25959b9a6ef 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go index 13941c476fc..f832b33d45f 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !amd64 || !gc || purego // +build !amd64 !gc purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go index 877b6de2926..84858480dff 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && gc && !purego // +build amd64,gc,!purego package curve25519 diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go index 80d3300af5d..259728af7da 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !amd64 || !gc || purego // +build !amd64 !gc purego package curve25519 diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go index c7f8c7e64ec..71ad917dadd 100644 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -5,6 +5,7 @@ // In Go 1.13, the ed25519 package was promoted to the standard library as // crypto/ed25519, and this package became a wrapper for the standard library one. // +//go:build !go1.13 // +build !go1.13 // Package ed25519 implements the Ed25519 signature algorithm. See diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go index d1448d8d220..b5974dc8b27 100644 --- a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go +++ b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 // Package ed25519 implements the Ed25519 signature algorithm. See diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go index 281c27ef02d..4fad24f8dcd 100644 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego // Package subtle implements functions that are often useful in cryptographic diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go index e20a2965927..80ccbed2c0d 100644 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego // Package subtle implements functions that are often useful in cryptographic diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go index 157a69f61bd..45b5c966b2b 100644 --- a/vendor/golang.org/x/crypto/poly1305/bits_compat.go +++ b/vendor/golang.org/x/crypto/poly1305/bits_compat.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go index a0a185f0fc7..ed52b3418ab 100644 --- a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go +++ b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go index af6c94f921b..f184b67d98d 100644 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego // +build !amd64,!ppc64le,!s390x !gc purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go index cf3a69ed3b9..6d522333f29 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go index cb4b7185dcb..4a069941a6e 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go index 188a665e127..62cc9f84709 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go index fac0c0e2c9e..c74fc20fcb3 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !gc || purego || !s390x // +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go index 92b63a3cf0b..0f4ae8bacff 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 purego !gc +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go index 3e3e7600f9a..248a38241ff 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go index 3cf6a22e093..8b4453aac3c 100644 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.4 // +build go1.4 package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index 485e2d5fa83..4fcfc924ef6 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go index 68148f2643a..5c0710ef98f 100644 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !gc || purego || !s390x // +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index ddafa826c96..59c8eb94186 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!amd64 && !386 && !ppc64le) || purego // +build !amd64,!386,!ppc64le purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go index 6249ad85caa..1ce606246d5 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (amd64 || 386 || ppc64le) && !purego // +build amd64 386 ppc64le // +build !purego diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index c6a189dbe04..f8c93849859 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -125,6 +125,12 @@ func (f *File) AddComment(text string) { type VersionFixer func(path, version string) (string, error) +// errDontFix is returned by a VersionFixer to indicate the version should be +// left alone, even if it's not canonical. +var dontFixRetract VersionFixer = func(_, vers string) (string, error) { + return vers, nil +} + // Parse parses the data, reported in errors as being from file, // into a File struct. It applies fix, if non-nil, to canonicalize all module versions found. func Parse(file string, data []byte, fix VersionFixer) (*File, error) { @@ -142,7 +148,7 @@ func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { return parseToFile(file, data, fix, false) } -func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) { +func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parsed *File, err error) { fs, err := parse(file, data) if err != nil { return nil, err @@ -150,8 +156,18 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File f := &File{ Syntax: fs, } - var errs ErrorList + + // fix versions in retract directives after the file is parsed. + // We need the module path to fix versions, and it might be at the end. + defer func() { + oldLen := len(errs) + f.fixRetract(fix, &errs) + if len(errs) > oldLen { + parsed, err = nil, errs + } + }() + for _, x := range fs.Stmt { switch x := x.(type) { case *Line: @@ -370,7 +386,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a case "retract": rationale := parseRetractRationale(block, line) - vi, err := parseVersionInterval(verb, &args, fix) + vi, err := parseVersionInterval(verb, "", &args, dontFixRetract) if err != nil { if strict { wrapError(err) @@ -397,6 +413,47 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a } } +// fixRetract applies fix to each retract directive in f, appending any errors +// to errs. +// +// Most versions are fixed as we parse the file, but for retract directives, +// the relevant module path is the one specified with the module directive, +// and that might appear at the end of the file (or not at all). +func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) { + if fix == nil { + return + } + path := "" + if f.Module != nil { + path = f.Module.Mod.Path + } + var r *Retract + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: r.Syntax.Start, + Err: err, + }) + } + + for _, r = range f.Retract { + if path == "" { + wrapError(errors.New("no module directive found, so retract cannot be used")) + return // only print the first one of these + } + + args := r.Syntax.Token + if args[0] == "retract" { + args = args[1:] + } + vi, err := parseVersionInterval("retract", path, &args, fix) + if err != nil { + wrapError(err) + } + r.VersionInterval = vi + } +} + // isIndirect reports whether line has a "// indirect" comment, // meaning it is in go.mod only for its effect on indirect dependencies, // so that it can be dropped entirely once the effective version of the @@ -491,13 +548,13 @@ func AutoQuote(s string) string { return s } -func parseVersionInterval(verb string, args *[]string, fix VersionFixer) (VersionInterval, error) { +func parseVersionInterval(verb string, path string, args *[]string, fix VersionFixer) (VersionInterval, error) { toks := *args if len(toks) == 0 || toks[0] == "(" { return VersionInterval{}, fmt.Errorf("expected '[' or version") } if toks[0] != "[" { - v, err := parseVersion(verb, "", &toks[0], fix) + v, err := parseVersion(verb, path, &toks[0], fix) if err != nil { return VersionInterval{}, err } @@ -509,7 +566,7 @@ func parseVersionInterval(verb string, args *[]string, fix VersionFixer) (Versio if len(toks) == 0 { return VersionInterval{}, fmt.Errorf("expected version after '['") } - low, err := parseVersion(verb, "", &toks[0], fix) + low, err := parseVersion(verb, path, &toks[0], fix) if err != nil { return VersionInterval{}, err } @@ -523,7 +580,7 @@ func parseVersionInterval(verb string, args *[]string, fix VersionFixer) (Versio if len(toks) == 0 { return VersionInterval{}, fmt.Errorf("expected version after ','") } - high, err := parseVersion(verb, "", &toks[0], fix) + high, err := parseVersion(verb, path, &toks[0], fix) if err != nil { return VersionInterval{}, err } @@ -631,8 +688,7 @@ func parseVersion(verb string, path string, s *string, fix VersionFixer) (string } } if fix != nil { - var err error - t, err = fix(path, t) + fixed, err := fix(path, t) if err != nil { if err, ok := err.(*module.ModuleError); ok { return "", &Error{ @@ -643,19 +699,23 @@ func parseVersion(verb string, path string, s *string, fix VersionFixer) (string } return "", err } + t = fixed + } else { + cv := module.CanonicalVersion(t) + if cv == "" { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: &module.InvalidVersionError{ + Version: t, + Err: errors.New("must be of the form v1.2.3"), + }, + } + } + t = cv } - if v := module.CanonicalVersion(t); v != "" { - *s = v - return *s, nil - } - return "", &Error{ - Verb: verb, - ModPath: path, - Err: &module.InvalidVersionError{ - Version: t, - Err: errors.New("must be of the form v1.2.3"), - }, - } + *s = t + return *s, nil } func modulePathMajor(path string) (string, error) { @@ -835,11 +895,8 @@ func (f *File) DropRequire(path string) error { // AddExclude adds a exclude statement to the mod file. Errors if the provided // version is not a canonical version string func (f *File) AddExclude(path, vers string) error { - if !isCanonicalVersion(vers) { - return &module.InvalidVersionError{ - Version: vers, - Err: errors.New("must be of the form v1.2.3"), - } + if err := checkCanonicalVersion(path, vers); err != nil { + return err } var hint *Line @@ -916,17 +973,15 @@ func (f *File) DropReplace(oldPath, oldVers string) error { // AddRetract adds a retract statement to the mod file. Errors if the provided // version interval does not consist of canonical version strings func (f *File) AddRetract(vi VersionInterval, rationale string) error { - if !isCanonicalVersion(vi.High) { - return &module.InvalidVersionError{ - Version: vi.High, - Err: errors.New("must be of the form v1.2.3"), - } + var path string + if f.Module != nil { + path = f.Module.Mod.Path } - if !isCanonicalVersion(vi.Low) { - return &module.InvalidVersionError{ - Version: vi.Low, - Err: errors.New("must be of the form v1.2.3"), - } + if err := checkCanonicalVersion(path, vi.High); err != nil { + return err + } + if err := checkCanonicalVersion(path, vi.Low); err != nil { + return err } r := &Retract{ @@ -1086,8 +1141,40 @@ func lineRetractLess(li, lj *Line) bool { return semver.Compare(vii.High, vij.High) > 0 } -// isCanonicalVersion tests if the provided version string represents a valid -// canonical version. -func isCanonicalVersion(vers string) bool { - return vers != "" && semver.Canonical(vers) == vers +// checkCanonicalVersion returns a non-nil error if vers is not a canonical +// version string or does not match the major version of path. +// +// If path is non-empty, the error text suggests a format with a major version +// corresponding to the path. +func checkCanonicalVersion(path, vers string) error { + _, pathMajor, pathMajorOk := module.SplitPathVersion(path) + + if vers == "" || vers != module.CanonicalVersion(vers) { + if pathMajor == "" { + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form v1.2.3"), + } + } + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form %s.2.3", module.PathMajorPrefix(pathMajor)), + } + } + + if pathMajorOk { + if err := module.CheckPathMajor(vers, pathMajor); err != nil { + if pathMajor == "" { + // In this context, the user probably wrote "v2.3.4" when they meant + // "v2.3.4+incompatible". Suggest that instead of "v0 or v1". + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("should be %s+incompatible (or module %s/%v)", vers, path, semver.Major(vers)), + } + } + return err + } + } + + return nil } diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index c1c5263c427..0e03014837c 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -224,12 +224,16 @@ func firstPathOK(r rune) bool { 'a' <= r && r <= 'z' } -// pathOK reports whether r can appear in an import path element. +// modPathOK reports whether r can appear in a module path element. // Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. -// This matches what "go get" has historically recognized in import paths. +// +// This matches what "go get" has historically recognized in import paths, +// and avoids confusing sequences like '%20' or '+' that would change meaning +// if used in a URL. +// // TODO(rsc): We would like to allow Unicode letters, but that requires additional // care in the safe encoding (see "escaped paths" above). -func pathOK(r rune) bool { +func modPathOK(r rune) bool { if r < utf8.RuneSelf { return r == '-' || r == '.' || r == '_' || r == '~' || '0' <= r && r <= '9' || @@ -239,6 +243,17 @@ func pathOK(r rune) bool { return false } +// modPathOK reports whether r can appear in a package import path element. +// +// Import paths are intermediate between module paths and file paths: we allow +// disallow characters that would be confusing or ambiguous as arguments to +// 'go get' (such as '@' and ' ' ), but allow certain characters that are +// otherwise-unambiguous on the command line and historically used for some +// binary names (such as '++' as a suffix for compiler binaries and wrappers). +func importPathOK(r rune) bool { + return modPathOK(r) || r == '+' +} + // fileNameOK reports whether r can appear in a file name. // For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. // If we expand the set of allowed characters here, we have to @@ -270,7 +285,7 @@ func fileNameOK(r rune) bool { // CheckPath checks that a module path is valid. // A valid module path is a valid import path, as checked by CheckImportPath, -// with two additional constraints. +// with three additional constraints. // First, the leading path element (up to the first slash, if any), // by convention a domain name, must contain only lower-case ASCII letters, // ASCII digits, dots (U+002E), and dashes (U+002D); @@ -280,8 +295,9 @@ func fileNameOK(r rune) bool { // and must not contain any dots. For paths beginning with "gopkg.in/", // this second requirement is replaced by a requirement that the path // follow the gopkg.in server's conventions. +// Third, no path element may begin with a dot. func CheckPath(path string) error { - if err := checkPath(path, false); err != nil { + if err := checkPath(path, modulePath); err != nil { return fmt.Errorf("malformed module path %q: %v", path, err) } i := strings.Index(path, "/") @@ -315,7 +331,7 @@ func CheckPath(path string) error { // // A valid path element is a non-empty string made up of // ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. -// It must not begin or end with a dot (U+002E), nor contain two dots in a row. +// It must not end with a dot (U+002E), nor contain two dots in a row. // // The element prefix up to the first dot must not be a reserved file name // on Windows, regardless of case (CON, com1, NuL, and so on). The element @@ -326,19 +342,29 @@ func CheckPath(path string) error { // top-level package documentation for additional information about // subtleties of Unicode. func CheckImportPath(path string) error { - if err := checkPath(path, false); err != nil { + if err := checkPath(path, importPath); err != nil { return fmt.Errorf("malformed import path %q: %v", path, err) } return nil } +// pathKind indicates what kind of path we're checking. Module paths, +// import paths, and file paths have different restrictions. +type pathKind int + +const ( + modulePath pathKind = iota + importPath + filePath +) + // checkPath checks that a general path is valid. // It returns an error describing why but not mentioning path. // Because these checks apply to both module paths and import paths, // the caller is expected to add the "malformed ___ path %q: " prefix. // fileName indicates whether the final element of the path is a file name // (as opposed to a directory name). -func checkPath(path string, fileName bool) error { +func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") } @@ -357,39 +383,45 @@ func checkPath(path string, fileName bool) error { elemStart := 0 for i, r := range path { if r == '/' { - if err := checkElem(path[elemStart:i], fileName); err != nil { + if err := checkElem(path[elemStart:i], kind); err != nil { return err } elemStart = i + 1 } } - if err := checkElem(path[elemStart:], fileName); err != nil { + if err := checkElem(path[elemStart:], kind); err != nil { return err } return nil } // checkElem checks whether an individual path element is valid. -// fileName indicates whether the element is a file name (not a directory name). -func checkElem(elem string, fileName bool) error { +func checkElem(elem string, kind pathKind) error { if elem == "" { return fmt.Errorf("empty path element") } if strings.Count(elem, ".") == len(elem) { return fmt.Errorf("invalid path element %q", elem) } - if elem[0] == '.' && !fileName { + if elem[0] == '.' && kind == modulePath { return fmt.Errorf("leading dot in path element") } if elem[len(elem)-1] == '.' { return fmt.Errorf("trailing dot in path element") } - charOK := pathOK - if fileName { - charOK = fileNameOK - } for _, r := range elem { - if !charOK(r) { + ok := false + switch kind { + case modulePath: + ok = modPathOK(r) + case importPath: + ok = importPathOK(r) + case filePath: + ok = fileNameOK(r) + default: + panic(fmt.Sprintf("internal error: invalid kind %v", kind)) + } + if !ok { return fmt.Errorf("invalid char %q", r) } } @@ -406,7 +438,7 @@ func checkElem(elem string, fileName bool) error { } } - if fileName { + if kind == filePath { // don't check for Windows short-names in file names. They're // only an issue for import paths. return nil @@ -444,7 +476,7 @@ func checkElem(elem string, fileName bool) error { // top-level package documentation for additional information about // subtleties of Unicode. func CheckFilePath(path string) error { - if err := checkPath(path, true); err != nil { + if err := checkPath(path, filePath); err != nil { return fmt.Errorf("malformed file path %q: %v", path, err) } return nil @@ -647,7 +679,7 @@ func EscapePath(path string) (escaped string, err error) { // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. func EscapeVersion(v string) (escaped string, err error) { - if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { return "", &InvalidVersionError{ Version: v, Err: fmt.Errorf("disallowed version string"), @@ -706,7 +738,7 @@ func UnescapeVersion(escaped string) (v string, err error) { if !ok { return "", fmt.Errorf("invalid escaped version %q", escaped) } - if err := checkElem(v, true); err != nil { + if err := checkElem(v, filePath); err != nil { return "", fmt.Errorf("invalid escaped version %q: %v", v, err) } return v, nil diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index d20f52b7de9..344bd143345 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.7 // +build go1.7 package context diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go index d88bd1db127..64d31ecc3ef 100644 --- a/vendor/golang.org/x/net/context/go19.go +++ b/vendor/golang.org/x/net/context/go19.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.9 // +build go1.9 package context diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 0f35592df51..5270db5db7d 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.7 // +build !go1.7 package context diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go index b105f80be4f..1f9715341fa 100644 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.9 // +build !go1.9 package context diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile index 53fc5257974..8512245952b 100644 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -38,7 +38,7 @@ RUN make RUN make install WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN wget https://curl.se/download/curl-7.45.0.tar.gz RUN tar -zxvf curl-7.45.0.tar.gz WORKDIR /root/curl-7.45.0 RUN ./configure --with-ssl --with-nghttp2=/usr/local diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go index 3a131016b2c..5bf62b032ec 100644 --- a/vendor/golang.org/x/net/http2/go111.go +++ b/vendor/golang.org/x/net/http2/go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.11 // +build go1.11 package http2 diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go index 161bca7ce85..cc0baa8197f 100644 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.11 // +build !go1.11 package http2 diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 2aa859f76f9..e125bbd2a26 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1293,7 +1293,9 @@ func (sc *serverConn) startGracefulShutdown() { sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) } -// After sending GOAWAY, the connection will close after goAwayTimeout. +// After sending GOAWAY with an error code (non-graceful shutdown), the +// connection will close after goAwayTimeout. +// // If we close the connection immediately after sending GOAWAY, there may // be unsent data in our kernel receive buffer, which will cause the kernel // to send a TCP RST on close() instead of a FIN. This RST will abort the @@ -1629,23 +1631,37 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + id := f.Header().StreamID + if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) { + // Discard all DATA frames if the GOAWAY is due to an + // error, or: + // + // Section 6.8: After sending a GOAWAY frame, the sender + // can discard frames for streams initiated by the + // receiver with identifiers higher than the identified + // last stream. return nil } - data := f.Data() - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID + data := f.Data() state, st := sc.state(id) if id == 0 || state == stateIdle { + // Section 6.1: "DATA frames MUST be associated with a + // stream. If a DATA frame is received whose stream + // identifier field is 0x0, the recipient MUST respond + // with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + // // Section 5.1: "Receiving any frame other than HEADERS // or PRIORITY on a stream in this state MUST be // treated as a connection error (Section 5.4.1) of // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index a98a31f4038..7e69ee1b22e 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -4,6 +4,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.10 // +build go1.10 // Package idna implements IDNA2008 using the compatibility processing diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index 8842146b5d9..7c7456374c1 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -4,6 +4,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.10 // +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index 54fddb4b16c..d1d62ef459b 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 8ce0811fdf3..167efba7125 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index f39f0cb4cd8..ab40f7bcc3b 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index e8c7a36d7a7..390c5e56d2a 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 8b65fa16783..4074b5332e3 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package idna diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 83dacac320a..16c6c6b90ce 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index 04c2c2216af..a7e27b3d299 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index ad2c09236c5..ae391313d2b 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -21,6 +21,10 @@ import ( // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials +// Credentials from external accounts (workload identity federation) are used to +// identify a particular application from an on-prem or non-Google Cloud platform +// including Amazon Web Services (AWS), Microsoft Azure or any identity provider +// that supports OpenID Connect (OIDC). type Credentials struct { ProjectID string // may be empty TokenSource oauth2.TokenSource @@ -65,6 +69,10 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // // 1. A JSON file whose path is specified by the // GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. @@ -119,8 +127,10 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can // represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). +// ConfigFromJSON), a Google Developers service account key file (as in +// JWTConfigFromJSON) or the JSON configuration file for workload identity federation +// in non-Google cloud platforms (see +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { var f credentialsFile if err := json.Unmarshal(jsonData, &f); err != nil { diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 73be629033d..b241c728a6b 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -4,13 +4,16 @@ // Package google provides support for making OAuth2 authorized and authenticated // HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, and Google -// App Engine service accounts. +// credentials, service accounts, Google Compute Engine service accounts, Google +// App Engine service accounts and workload identity federation from non-Google +// cloud platforms. // // A brief overview of the package follows. For more information, please read // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // // OAuth2 Configs // @@ -19,6 +22,35 @@ // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // +// Workload Identity Federation +// +// Using workload identity federation, your application can access Google Cloud +// resources from Amazon Web Services (AWS), Microsoft Azure or any identity +// provider that supports OpenID Connect (OIDC). +// Traditionally, applications running outside Google Cloud have used service +// account keys to access Google Cloud resources. Using identity federation, +// you can allow your workload to impersonate a service account. +// This lets you access Google Cloud resources directly, eliminating the +// maintenance and security burden associated with service account keys. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws +// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure +// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// +// For OIDC providers, the library can retrieve OIDC tokens either from a +// local file location (file-sourced credentials) or from a local server +// (URL-sourced credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// // // Credentials // @@ -29,6 +61,13 @@ // FindDefaultCredentials looks in some well-known places for a credentials file, and // will call AppEngineTokenSource or ComputeTokenSource as needed. // +// Application Default Credentials also support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage and +// store service account private keys locally. +// // DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, // then use the credentials to construct an http.Client or an oauth2.TokenSource. // diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index 2f078f73a5a..fbcefb474e3 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -5,6 +5,7 @@ package externalaccount import ( + "bytes" "context" "crypto/hmac" "crypto/sha256" @@ -127,7 +128,7 @@ func canonicalHeaders(req *http.Request) (string, string) { } sort.Strings(headers) - var fullHeaders strings.Builder + var fullHeaders bytes.Buffer for _, header := range headers { headerValue := strings.Join(lowerCaseHeaders[header], ",") fullHeaders.WriteString(header) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 2eb5c8e248c..1a6e93cec7a 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -14,7 +14,9 @@ import ( ) // now aliases time.Now for testing -var now = time.Now +var now = func() time.Time { + return time.Now().UTC() +} // Config stores the configuration for fetching tokens with external credentials. type Config struct { @@ -45,7 +47,7 @@ func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { ctx: ctx, url: c.ServiceAccountImpersonationURL, scopes: scopes, - ts: oauth2.ReuseTokenSource(nil, ts), + ts: oauth2.ReuseTokenSource(nil, ts), } return oauth2.ReuseTokenSource(nil, imp) } @@ -124,7 +126,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { if err != nil { return nil, err } - stsRequest := STSTokenExchangeRequest{ + stsRequest := stsTokenExchangeRequest{ GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", Audience: conf.Audience, Scope: conf.Scopes, @@ -134,12 +136,12 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { } header := make(http.Header) header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := ClientAuthentication{ + clientAuth := clientAuthentication{ AuthStyle: oauth2.AuthStyleInHeader, ClientID: conf.ClientID, ClientSecret: conf.ClientSecret, } - stsResp, err := ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, nil) + stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, nil) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go index 0464724fd8c..feccf8b68ef 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go @@ -11,15 +11,15 @@ import ( "net/url" ) -// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type ClientAuthentication struct { +// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type clientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string ClientSecret string } -func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go index fbb477d10e9..a8a704bb411 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go @@ -18,11 +18,11 @@ import ( "golang.org/x/oauth2" ) -// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +// exchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*STSTokenExchangeResponse, error) { +func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { client := oauth2.NewClient(ctx, nil) @@ -68,7 +68,7 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) } - var stsResp STSTokenExchangeResponse + var stsResp stsTokenExchangeResponse err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -78,8 +78,8 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan return &stsResp, nil } -// STSTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type STSTokenExchangeRequest struct { +// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type stsTokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -93,8 +93,8 @@ type STSTokenExchangeRequest struct { SubjectTokenType string } -// STSTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type STSTokenExchangeResponse struct { +// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. +type stsTokenExchangeResponse struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index 7434871880a..e1755d1d9ac 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go index 464a209cf59..28b521643b1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix // +build aix package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index 7f7f272a014..ccf542a73da 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go index 75a95566161..0af2f248412 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc // +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 4adb89cf9cc..fa7cdb9bcd5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (386 || amd64 || amd64p32) && gc // +build 386 amd64 amd64p32 // +build gc diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 53ca8d65c37..2aff3189116 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gccgo // +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go index aa986f77825..4bfbda61993 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gccgo // +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index ba49b91bd39..8478a6d5979 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (386 || amd64 || amd64p32) && gccgo // +build 386 amd64 amd64p32 // +build gccgo diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go index 6fc874f7fef..159a686f6f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !386 && !amd64 && !amd64p32 && !arm64 // +build !386,!amd64,!amd64p32,!arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go index 5a418900538..6000db4cdd1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips64 || mips64le) // +build linux // +build mips64 mips64le diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index 42b5d33cb69..f4992b1a593 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x // +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go index 99f8a6399ef..021356d6deb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go index 57b5b677de0..f4063c66423 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build mips64 || mips64le // +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go index cfc1946b7bb..07c4e36d8f5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build mips || mipsle // +build mips mipsle package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go index b412efc1bd1..d7b4fb4ccc2 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !linux && arm // +build !linux,arm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index 16c1c4090ee..f8c484f589f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !linux,!netbsd -// +build arm64 +//go:build !linux && !netbsd && arm64 +// +build !linux,!netbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go index f49fad67783..0dafe9644a5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !linux && (mips64 || mips64le) // +build !linux // +build mips64 mips64le diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go index d28d675b5f1..4e8acd16583 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ppc64 || ppc64le // +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 8b08de341b8..bd6c128af9b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build riscv64 // +build riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go index 5382f2a227a..7747d888a69 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build wasm // +build wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 48d42933195..fd380c0a713 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 || amd64 || amd64p32 // +build 386 amd64 amd64p32 package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index 76fbe40b762..a864f24d758 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -8,8 +8,8 @@ // Morever, this file will be used during the building of // gccgo's libgo and thus must not used a CGo method. -// +build aix -// +build gccgo +//go:build aix && gccgo +// +build aix,gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go index 5b427d67e2f..904be42ffdc 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -6,8 +6,8 @@ // system call on AIX without depending on x/sys/unix. // (See golang.org/issue/32102) -// +build aix,ppc64 -// +build gc +//go:build aix && ppc64 && gc +// +build aix,ppc64,gc package cpu diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index 951fce4d0d9..abc89c104a8 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s similarity index 70% rename from vendor/golang.org/x/sys/unix/asm_netbsd_386.s rename to vendor/golang.org/x/sys/unix/asm_bsd_386.s index ae7b498d506..7f29275fa00 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for 386, NetBSD -// +// System call support for 386 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. @@ -22,7 +22,7 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-40 TEXT ·Syscall9(SB),NOSPLIT,$0-52 JMP syscall·Syscall9(SB) -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 +TEXT ·RawSyscall(SB),NOSPLIT,$0-28 JMP syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s similarity index 72% rename from vendor/golang.org/x/sys/unix/asm_darwin_amd64.s rename to vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index f2397fde554..2b99c349a2d 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc +// +build darwin dragonfly freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for AMD64, Darwin -// +// System call support for AMD64 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s similarity index 74% rename from vendor/golang.org/x/sys/unix/asm_darwin_arm.s rename to vendor/golang.org/x/sys/unix/asm_bsd_arm.s index c9e6b6fc8b5..98ebfad9d51 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -1,15 +1,14 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd // +build gc -// +build arm,darwin #include "textflag.h" -// -// System call support for ARM, Darwin -// +// System call support for ARM BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s similarity index 75% rename from vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s rename to vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index e57367c17aa..fe36a7391a6 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -1,14 +1,14 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd // +build gc #include "textflag.h" -// -// System call support for AMD64, NetBSD -// +// System call support for ARM64 BSD // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s deleted file mode 100644 index 8a06b87d715..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_darwin_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, Darwin -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s deleted file mode 100644 index 89843f8f4b2..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc -// +build arm64,darwin - -#include "textflag.h" - -// -// System call support for AMD64, Darwin -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s deleted file mode 100644 index 27674e1cafd..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, DragonFly -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s deleted file mode 100644 index 49f0ac2364c..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s deleted file mode 100644 index f2dfc57b836..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s deleted file mode 100644 index 6d740db2c0c..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s deleted file mode 100644 index a8f5a29b35f..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM64, FreeBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s deleted file mode 100644 index d7da175e1a3..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, NetBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s deleted file mode 100644 index e7cbe1904c4..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM64, NetBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s deleted file mode 100644 index 2f00b0310f4..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for 386, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s deleted file mode 100644 index 07632c99cea..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for AMD64, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s deleted file mode 100644 index 73e997320fc..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for ARM, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s deleted file mode 100644 index c47302aa46d..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#include "textflag.h" - -// -// System call support for arm64, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-104 - JMP syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s new file mode 100644 index 00000000000..3b54e185813 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -0,0 +1,426 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc +// +build zos +// +build s390x +// +build gc + +#include "textflag.h" + +#define PSALAA 1208(R0) +#define GTAB64(x) 80(x) +#define LCA64(x) 88(x) +#define CAA(x) 8(x) +#define EDCHPXV(x) 1016(x) // in the CAA +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA + +// SS_*, where x=SAVSTACK_ASYNC +#define SS_LE(x) 0(x) +#define SS_GO(x) 8(x) +#define SS_ERRNO(x) 16(x) +#define SS_ERRNOJR(x) 20(x) + +#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 + +TEXT ·clearErrno(SB),NOSPLIT,$0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) + RET + +// Returns the address of errno in R3. +TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get __errno FuncDesc. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(0x156*16), R9 + LMG 0(R9), R5, R6 + + // Switch to saved LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call __errno function. + LE_CALL + NOPH + + // Switch back to Go stack. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + RET + +TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+32(FP) + MOVD R0, r2+40(FP) + MOVD R0, err+48(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) +done: + RET + +TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+56(FP) + MOVD R0, r2+64(FP) + MOVD R0, err+72(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL ·rrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+72(FP) +done: + RET + +TEXT ·syscall_syscall9(SB),NOSPLIT,$0 + BL runtime·entersyscall(SB) + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + BL runtime·exitsyscall(SB) + RET + +TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 + MOVD a1+8(FP), R1 + MOVD a2+16(FP), R2 + MOVD a3+24(FP), R3 + + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get function. + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + MOVD trap+0(FP), R5 + SLD $4, R5 + ADD R5, R9 + LMG 0(R9), R5, R6 + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) + + // Fill in parameter list. + MOVD a4+32(FP), R12 + MOVD R12, (2176+24)(R4) + MOVD a5+40(FP), R12 + MOVD R12, (2176+32)(R4) + MOVD a6+48(FP), R12 + MOVD R12, (2176+40)(R4) + MOVD a7+56(FP), R12 + MOVD R12, (2176+48)(R4) + MOVD a8+64(FP), R12 + MOVD R12, (2176+56)(R4) + MOVD a9+72(FP), R12 + MOVD R12, (2176+64)(R4) + + // Call function. + LE_CALL + NOPH + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. + + MOVD R3, r1+80(FP) + MOVD R0, r2+88(FP) + MOVD R0, err+96(FP) + MOVW R3, R4 + CMP R4, $-1 + BNE done + BL addrerrno<>(SB) + MOVWZ 0(R3), R3 + MOVD R3, err+96(FP) +done: + RET + +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB),NOSPLIT,$0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) + + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 + + BYTE $0x0D // Branch to function + BYTE $0xEF + + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 + + RET + +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + BYTE $0x0A // SVC 08 LOAD + BYTE $0x08 + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, addr+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + BYTE $0x0A // SVC 09 DELETE + BYTE $0x09 + MOVD R2, R15 // Restore go stack pointer + +error: + MOVD $0, addr+8(FP) // Return 0 on failure +done: + XOR R0, R0 // Reset r0 to 0 + RET + +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB),NOSPLIT,$0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD addr+8(FP), R15 + BYTE $0x0A // SVC 09 + BYTE $0x09 + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, rc+0(FP) // Return SVC return code + RET + +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 + // Get library control area (LCA). + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD 0x3D0(R9), R9 + MOVD R9, ret+0(FP) + + RET diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index df520487737..0b7c6adb866 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd // +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 3a6ac648dd5..394a3965b68 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 5e5fb451044..65a998508db 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc +//go:build aix && ppc +// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8b401244c41..8fc08ad0aae 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc64 +//go:build aix && ppc64 +// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go new file mode 100644 index 00000000000..a388e59a0e0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Functions to access/create device major and minor numbers matching the +// encoding used by z/OS. +// +// The information below is extracted and adapted from macros. + +package unix + +// Major returns the major component of a z/OS device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 16) & 0x0000FFFF) +} + +// Minor returns the minor component of a z/OS device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0x0000FFFF) +} + +// Mkdev returns a z/OS device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 16) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 304016b6886..e74e5eaa3bf 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index 86781eac221..a5202655768 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 // +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 8822d8541f2..4362f47e2c0 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh // +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 84178b0a134..29ccc4d1334 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go new file mode 100644 index 00000000000..cedaf7e024b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -0,0 +1,221 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "sync" +) + +// This file simulates epoll on z/OS using poll. + +// Analogous to epoll_event on Linux. +// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + EPOLLERR = 0x8 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDNORM = 0x40 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + // The following constants are part of the epoll API, but represent + // currently unsupported functionality on z/OS. + // EPOLL_CLOEXEC = 0x80000 + // EPOLLET = 0x80000000 + // EPOLLONESHOT = 0x40000000 + // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis + // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode + // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability +) + +// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL +// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). + +// epToPollEvt converts epoll event field to poll equivalent. +// In epoll, Events is a 32-bit field, while poll uses 16 bits. +func epToPollEvt(events uint32) int16 { + var ep2p = map[uint32]int16{ + EPOLLIN: POLLIN, + EPOLLOUT: POLLOUT, + EPOLLHUP: POLLHUP, + EPOLLPRI: POLLPRI, + EPOLLERR: POLLERR, + } + + var pollEvts int16 = 0 + for epEvt, pEvt := range ep2p { + if (events & epEvt) != 0 { + pollEvts |= pEvt + } + } + + return pollEvts +} + +// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. +func pToEpollEvt(revents int16) uint32 { + var p2ep = map[int16]uint32{ + POLLIN: EPOLLIN, + POLLOUT: EPOLLOUT, + POLLHUP: EPOLLHUP, + POLLPRI: EPOLLPRI, + POLLERR: EPOLLERR, + } + + var epollEvts uint32 = 0 + for pEvt, epEvt := range p2ep { + if (revents & pEvt) != 0 { + epollEvts |= epEvt + } + } + + return epollEvts +} + +// Per-process epoll implementation. +type epollImpl struct { + mu sync.Mutex + epfd2ep map[int]*eventPoll + nextEpfd int +} + +// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. +// On Linux, this is an in-kernel data structure accessed through a fd. +type eventPoll struct { + mu sync.Mutex + fds map[int]*EpollEvent +} + +// epoll impl for this process. +var impl epollImpl = epollImpl{ + epfd2ep: make(map[int]*eventPoll), + nextEpfd: 0, +} + +func (e *epollImpl) epollcreate(size int) (epfd int, err error) { + e.mu.Lock() + defer e.mu.Unlock() + epfd = e.nextEpfd + e.nextEpfd++ + + e.epfd2ep[epfd] = &eventPoll{ + fds: make(map[int]*EpollEvent), + } + return epfd, nil +} + +func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { + return e.epollcreate(4) +} + +func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { + e.mu.Lock() + defer e.mu.Unlock() + + ep, ok := e.epfd2ep[epfd] + if !ok { + + return EBADF + } + + switch op { + case EPOLL_CTL_ADD: + // TODO(neeilan): When we make epfds and fds disjoint, detect epoll + // loops here (instances watching each other) and return ELOOP. + if _, ok := ep.fds[fd]; ok { + return EEXIST + } + ep.fds[fd] = event + case EPOLL_CTL_MOD: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + ep.fds[fd] = event + case EPOLL_CTL_DEL: + if _, ok := ep.fds[fd]; !ok { + return ENOENT + } + delete(ep.fds, fd) + + } + return nil +} + +// Must be called while holding ep.mu +func (ep *eventPoll) getFds() []int { + fds := make([]int, len(ep.fds)) + for fd := range ep.fds { + fds = append(fds, fd) + } + return fds +} + +func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { + e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait + ep, ok := e.epfd2ep[epfd] + + if !ok { + e.mu.Unlock() + return 0, EBADF + } + + pollfds := make([]PollFd, 4) + for fd, epollevt := range ep.fds { + pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) + } + e.mu.Unlock() + + n, err = Poll(pollfds, msec) + if err != nil { + return n, err + } + + i := 0 + for _, pFd := range pollfds { + if pFd.Revents != 0 { + events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} + i++ + } + + if i == n { + break + } + } + + return n, nil +} + +func EpollCreate(size int) (fd int, err error) { + return impl.epollcreate(size) +} + +func EpollCreate1(flag int) (fd int, err error) { + return impl.epollcreate1(flag) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return impl.epollctl(epfd, op, fd, event) +} + +// Because EpollWait mutates events, the caller is expected to coordinate +// concurrent access if calling with the same epfd from multiple goroutines. +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return impl.epollwait(epfd, events, msec) +} diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 4dc53486436..e9b991258c1 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build dragonfly || freebsd || linux || netbsd || openbsd // +build dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 8db48e5e062..cb0dfbd09a0 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) // +build linux,386 linux,arm linux,mips linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index b27be0a014c..b1e07b22023 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go new file mode 100644 index 00000000000..e377cc9f49c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -0,0 +1,164 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "unsafe" +) + +// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + var stat_v Statvfs_t + err = Fstatvfs(fd, &stat_v) + if err == nil { + // populate stat + stat.Type = 0 + stat.Bsize = stat_v.Bsize + stat.Blocks = stat_v.Blocks + stat.Bfree = stat_v.Bfree + stat.Bavail = stat_v.Bavail + stat.Files = stat_v.Files + stat.Ffree = stat_v.Ffree + stat.Fsid = stat_v.Fsid + stat.Namelen = stat_v.Namemax + stat.Frsize = stat_v.Frsize + stat.Flags = stat_v.Flag + for passn := 0; passn < 5; passn++ { + switch passn { + case 0: + err = tryGetmntent64(stat) + break + case 1: + err = tryGetmntent128(stat) + break + case 2: + err = tryGetmntent256(stat) + break + case 3: + err = tryGetmntent512(stat) + break + case 4: + err = tryGetmntent1024(stat) + break + default: + break + } + //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) + if err == nil || err != nil && err != ERANGE { + break + } + } + } + return err +} + +func tryGetmntent64(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [64]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent128(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [128]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent256(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [256]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent512(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [512]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} + +func tryGetmntent1024(stat *Statfs_t) (err error) { + var mnt_ent_buffer struct { + header W_Mnth + filesys_info [1024]W_Mntent + } + var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) + fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) + if err != nil { + return err + } + err = ERANGE //return ERANGE if no match is found in this batch + for i := 0; i < fs_count; i++ { + if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { + stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) + err = nil + break + } + } + return err +} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 86032c11ef3..0dee23222ca 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo -// +build !aix +//go:build gccgo && !aix +// +build gccgo,!aix package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index 251a977a811..e60e49a3d9c 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gccgo && linux && amd64 // +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 5641678613c..6c7ad052e6b 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go new file mode 100644 index 00000000000..5384e7d91d7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -0,0 +1,74 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value is expected to be TCSETS, TCSETSW, or TCSETSF +func IoctlSetTermios(fd int, req uint, value *Termios) error { + if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { + return ENOSYS + } + err := Tcsetattr(fd, int(req), value) + runtime.KeepAlive(value) + return err +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +// IoctlGetTermios performs an ioctl on fd with a *Termios. +// +// The req value is expected to be TCGETS +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + if req != TCGETS { + return &value, ENOSYS + } + err := Tcgetattr(fd, &value) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index d257fac5057..d727cad19c1 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -199,7 +199,7 @@ illumos_amd64) mksyscall="go run mksyscall_solaris.go" mkerrors= mksysnum= - mktypes= + mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; *) echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index ca98cb485ff..007358af8fc 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -56,6 +56,7 @@ includes_Darwin=' #define _DARWIN_C_SOURCE #define KERNEL #define _DARWIN_USE_64_BIT_INODE +#define __APPLE_USE_RFC_3542 #include #include #include @@ -115,6 +116,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -214,6 +216,8 @@ struct ltchars { #include #include #include +#include +#include #include #include #include @@ -224,6 +228,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -300,6 +305,17 @@ struct ltchars { // Including linux/l2tp.h here causes conflicts between linux/in.h // and netinet/in.h included via net/route.h above. #define IPPROTO_L2TP 115 + +// Copied from linux/hid.h. +// Keep in sync with the size of the referenced fields. +#define _HIDIOCGRAWNAME_LEN 128 // sizeof_field(struct hid_device, name) +#define _HIDIOCGRAWPHYS_LEN 64 // sizeof_field(struct hid_device, phys) +#define _HIDIOCGRAWUNIQ_LEN 64 // sizeof_field(struct hid_device, uniq) + +#define _HIDIOCGRAWNAME HIDIOCGRAWNAME(_HIDIOCGRAWNAME_LEN) +#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) +#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) + ' includes_NetBSD=' @@ -389,10 +405,11 @@ includes_SunOS=' #include #include #include +#include #include -#include #include #include +#include ' @@ -447,6 +464,8 @@ ccflags="$@" $2 !~ /^EPROC_/ && $2 !~ /^EQUIV_/ && $2 !~ /^EXPR_/ && + $2 !~ /^EVIOC/ && + $2 !~ /^EV_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || $2 ~ /^(OLD|NEW)DEV$/ || @@ -481,10 +500,10 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || - $2 == "ICMPV6_FILTER" || + $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || $2 == "SOMAXCONN" || $2 == "NAME_MAX" || $2 == "IFNAMSIZ" || @@ -571,6 +590,9 @@ ccflags="$@" $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || $2 ~ /^FAN_|FANOTIFY_/ || + $2 == "HID_MAX_DESCRIPTOR_SIZE" || + $2 ~ /^_?HIDIOC/ || + $2 ~ /^BUS_(USB|HIL|BLUETOOTH|VIRTUAL)$/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -608,6 +630,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo +echo "//go:build ${GOARCH} && ${GOOS}" echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index bc2f3629a7a..53f1b4c5b81 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index fc568b5403e..463c3eff7fd 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && !ios // +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index 183441c9a53..ed0509a0117 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ios // +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 61712b51c96..6f6c5fec5ae 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin && race) || (linux && race) || (freebsd && race) // +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index ad026678c7c..706e1322ae4 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly +//go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos +// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 3a90aa6dfae..4d6257569ea 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd // +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 5fdae40b3a8..2a4ba47c45b 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin // +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 003916ed7a0..453a942c5db 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 57a0021da55..0840fe4a574 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin freebsd linux netbsd openbsd solaris +//go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix @@ -36,6 +37,10 @@ func cmsgAlignOf(salen int) int { if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" { salign = 16 } + case "zos": + // z/OS socket macros use [32-bit] sizeof(int) alignment, + // not pointer width. + salign = SizeofInt } return (salen + salign - 1) & ^(salign - 1) diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go index 17fb6986831..8ba89ed8694 100644 --- a/vendor/golang.org/x/sys/unix/str.go +++ b/vendor/golang.org/x/sys/unix/str.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index ab75ef9cc62..649fa87405d 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 423dcced7ef..d8efb715ff1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix // +build aix // Aix system calls. @@ -251,7 +252,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index b3c8e3301ce..e92a0be1630 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc +//go:build aix && ppc +// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 9a6e024179d..16eed17098e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix -// +build ppc64 +//go:build aix && ppc64 +// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 678fb27777a..95ac3946b5c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || netbsd || openbsd // +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go index b31ef035881..b0098607c70 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.12 && !go1.13 // +build darwin,go1.12,!go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go index ee852f1abc5..5fc3cda6fc8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.13 // +build darwin,go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index e771a3c5efb..1223d7aed1c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -382,7 +382,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // The usual level and opt are SOL_LOCAL and LOCAL_PEERCRED, respectively. func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { x := new(Xucred) - vallen := _Socklen(unsafe.Sizeof(Xucred{})) + vallen := _Socklen(SizeofXucred) err := getsockopt(fd, level, opt, unsafe.Pointer(x), &vallen) return x, err } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index ee065fcf2da..64746771226 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && darwin // +build 386,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 7a1f64a7b6b..b37310ce9b4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && darwin // +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 9f85fd4046e..d51ec996304 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && darwin // +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index f34c86c899a..38bec300262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin && go1.12 // +build darwin,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 7629878bda2..5af108a5038 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -105,16 +105,16 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) +//sysnb pipe2(p *[2]_C_int, flags int) (r int, w int, err error) -func Pipe2(p []int, flags int) error { +func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { return EINVAL } var pp [2]_C_int - err := pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) + // pipe2 on dragonfly takes an fds array as an argument, but still + // returns the file descriptors. + p[0], p[1], err = pipe2(&pp, flags) return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index a6b4830ac8a..4e2d32120a8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 15af63dd552..18c392cf369 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -126,6 +126,15 @@ func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } +// GetsockoptXucred is a getsockopt wrapper that returns an Xucred struct. +// The usual level and opt are SOL_LOCAL and LOCAL_PEERCRED, respectively. +func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { + x := new(Xucred) + vallen := _Socklen(SizeofXucred) + err := getsockopt(fd, level, opt, unsafe.Pointer(x), &vallen) + return x, err +} + func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 72a506ddcb5..342fc32b168 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && freebsd // +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index d5e376acaea..a32d5aa4aed 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && freebsd // +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 4ea45bce52b..1e36d39abe0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && freebsd // +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index aa5326db193..a09a1537bd6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && freebsd // +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 7a2d4120fc0..c5c58806ca8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -4,11 +4,14 @@ // illumos system calls not present on Solaris. +//go:build amd64 && illumos // +build amd64,illumos package unix -import "unsafe" +import ( + "unsafe" +) func bytes2iovec(bs [][]byte) []Iovec { iovecs := make([]Iovec, len(bs)) @@ -75,3 +78,52 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { } return } + +//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) + +func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Len: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Len: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + return putmsg(fd, clp, datap, flags) +} + +//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) + +func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Maxlen: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Maxlen: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + + if err = getmsg(fd, clp, datap, &flags); err != nil { + return nil, nil, 0, err + } + + if len(cl) > 0 { + retCl = cl[:clp.Len] + } + if len(data) > 0 { + retData = data[:datap.Len] + } + return retCl, retData, flags, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 1b210357000..c61c963b7f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -106,6 +106,31 @@ func IoctlGetRTCTime(fd int) (*RTCTime, error) { return &value, err } +type ifreqEthtool struct { + name [IFNAMSIZ]byte + data unsafe.Pointer +} + +// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network +// device specified by ifname. +func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { + // Leave room for terminating NULL byte. + if len(ifname) >= IFNAMSIZ { + return nil, EINVAL + } + + value := EthtoolDrvinfo{ + Cmd: ETHTOOL_GDRVINFO, + } + ifreq := ifreqEthtool{ + data: unsafe.Pointer(&value), + } + copy(ifreq.name[:], ifname) + err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq))) + runtime.KeepAlive(ifreq) + return &value, err +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. @@ -137,12 +162,61 @@ func IoctlFileClone(destFd, srcFd int) error { return ioctl(destFd, FICLONE, uintptr(srcFd)) } +type FileDedupeRange struct { + Src_offset uint64 + Src_length uint64 + Reserved1 uint16 + Reserved2 uint32 + Info []FileDedupeRangeInfo +} + +type FileDedupeRangeInfo struct { + Dest_fd int64 + Dest_offset uint64 + Bytes_deduped uint64 + Status int32 + Reserved uint32 +} + // IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the -// range of data conveyed in value with the file associated with the file -// descriptor destFd. See the ioctl_fideduperange(2) man page for details. -func IoctlFileDedupeRange(destFd int, value *FileDedupeRange) error { - err := ioctl(destFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) +// range of data conveyed in value from the file associated with the file +// descriptor srcFd to the value.Info destinations. See the +// ioctl_fideduperange(2) man page for details. +func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { + buf := make([]byte, SizeofRawFileDedupeRange+ + len(value.Info)*SizeofRawFileDedupeRangeInfo) + rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0])) + rawrange.Src_offset = value.Src_offset + rawrange.Src_length = value.Src_length + rawrange.Dest_count = uint16(len(value.Info)) + rawrange.Reserved1 = value.Reserved1 + rawrange.Reserved2 = value.Reserved2 + + for i := range value.Info { + rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer( + uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) + + uintptr(i*SizeofRawFileDedupeRangeInfo))) + rawinfo.Dest_fd = value.Info[i].Dest_fd + rawinfo.Dest_offset = value.Info[i].Dest_offset + rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped + rawinfo.Status = value.Info[i].Status + rawinfo.Reserved = value.Info[i].Reserved + } + + err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0]))) + + // Output + for i := range value.Info { + rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer( + uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) + + uintptr(i*SizeofRawFileDedupeRangeInfo))) + value.Info[i].Dest_fd = rawinfo.Dest_fd + value.Info[i].Dest_offset = rawinfo.Dest_offset + value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped + value.Info[i].Status = rawinfo.Status + value.Info[i].Reserved = rawinfo.Reserved + } + return err } @@ -153,6 +227,36 @@ func IoctlWatchdogKeepalive(fd int) error { return ioctl(fd, WDIOC_KEEPALIVE, 0) } +func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error { + err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) { + var value HIDRawDevInfo + err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlHIDGetRawName(fd int) (string, error) { + var value [_HIDIOCGRAWNAME_LEN]byte + err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} + +func IoctlHIDGetRawPhys(fd int) (string, error) { + var value [_HIDIOCGRAWPHYS_LEN]byte + err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} + +func IoctlHIDGetRawUniq(fd int) (string, error) { + var value [_HIDIOCGRAWUNIQ_LEN]byte + err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0]))) + return ByteSliceToString(value[:]), err +} + //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { @@ -778,16 +882,19 @@ type SockaddrVM struct { // CID and Port specify a context ID and port address for a VM socket. // Guests have a unique CID, and hosts may have a well-known CID of: // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. + // - VMADDR_CID_LOCAL: refers to local communication (loopback). // - VMADDR_CID_HOST: refers to other processes on the host. - CID uint32 - Port uint32 - raw RawSockaddrVM + CID uint32 + Port uint32 + Flags uint8 + raw RawSockaddrVM } func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_VSOCK sa.raw.Port = sa.Port sa.raw.Cid = sa.CID + sa.raw.Flags = sa.Flags return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } @@ -1092,8 +1199,9 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { case AF_VSOCK: pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) sa := &SockaddrVM{ - CID: pp.Cid, - Port: pp.Port, + CID: pp.Cid, + Port: pp.Port, + Flags: pp.Flags, } return sa, nil case AF_BLUETOOTH: @@ -1798,6 +1906,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) +//sys CloseRange(first uint, last uint, flags uint) (err error) //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 70e61bd5db4..7b52e5d8a40 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && linux // +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 4be2acd32bd..28b76411522 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && linux // +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index baa771f8ad9..8b0f0f3aa56 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,linux -// +build gc +//go:build amd64 && linux && gc +// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 322b8e0fb90..68877728ec6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && linux // +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 9315cf415b7..7ed7034761c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && linux // +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 9edf3961b01..2b1168d7d19 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gc // +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 90e33d8cf75..9843fb48960 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gc && 386 // +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index 1a97baae732..a6008fccd59 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && gc && linux // +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 308eb7aecfa..7740af2428b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gccgo && 386 // +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index aa7fc9e1997..e16a12299ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && gccgo && arm // +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 5fc8a47f414..06dec06fa19 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips64 || mips64le) // +build linux // +build mips64 mips64le diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 20b9825f87b..8f0d0a5b592 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (mips || mipsle) // +build linux // +build mips mipsle diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 5162c39b0e1..0b1f0d6da57 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index a6a66ece099..ce9bcd31717 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build riscv64 && linux // +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index fcef0d1daf9..a1e45694b44 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build s390x && linux // +build s390x,linux package unix @@ -249,7 +250,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen } func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error { - args := [4]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val)} + args := [5]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen} _, _, err := Syscall(SYS_SOCKETCALL, netSetSockOpt, uintptr(unsafe.Pointer(&args)), 0) if err != 0 { return err diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 3b880448301..49055a3cf51 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build sparc64 && linux // +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 9b150709201..853d5f0f436 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -120,6 +120,19 @@ func Pipe(p []int) (err error) { return } +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err +} + //sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 24da8b52454..5199d282fd0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 25a0ac82589..70a9c52e980 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 21591ecd4d1..3eb5942f93f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index 80474963500..fc6ccfd810d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 42b5a0e51e8..6baabcdcb06 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 6ea4b48831b..bab25360eae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 1c3d26fa2c9..8eed3c4d4e7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index a8c458cb031..483dde99d4c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 169497f0620..77fcde7c180 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -565,7 +565,12 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, err = ioctlRet(fd, req, arg) + return err +} func IoctlSetTermio(fd int, req uint, value *Termio) error { err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index b22a34d7ae9..0bd25ef81f2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && solaris // +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 400ba9fbc90..a7618ceb55e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 87bd161cefc..5898e9a52b7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -2,8 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64 // +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build gc,!ppc64le,!ppc64 +// +build gc +// +build !ppc64le +// +build !ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index d36216c3ca7..f6f707acf2c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux && (ppc64le || ppc64) && gc // +build linux // +build ppc64le ppc64 // +build gc diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go new file mode 100644 index 00000000000..13f58d2b2fb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -0,0 +1,1781 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "bytes" + "runtime" + "sort" + "sync" + "syscall" + "unsafe" +) + +const ( + O_CLOEXEC = 0 // Dummy value (not supported). + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX +) + +func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) + +func copyStat(stat *Stat_t, statLE *Stat_LE_t) { + stat.Dev = uint64(statLE.Dev) + stat.Ino = uint64(statLE.Ino) + stat.Nlink = uint64(statLE.Nlink) + stat.Mode = uint32(statLE.Mode) + stat.Uid = uint32(statLE.Uid) + stat.Gid = uint32(statLE.Gid) + stat.Rdev = uint64(statLE.Rdev) + stat.Size = statLE.Size + stat.Atim.Sec = int64(statLE.Atim) + stat.Atim.Nsec = 0 //zos doesn't return nanoseconds + stat.Mtim.Sec = int64(statLE.Mtim) + stat.Mtim.Nsec = 0 //zos doesn't return nanoseconds + stat.Ctim.Sec = int64(statLE.Ctim) + stat.Ctim.Nsec = 0 //zos doesn't return nanoseconds + stat.Blksize = int64(statLE.Blksize) + stat.Blocks = statLE.Blocks +} + +func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +func svcLoad(name *byte) unsafe.Pointer +func svcUnload(name *byte, fnptr unsafe.Pointer) int64 + +func (d *Dirent) NameString() string { + if d == nil { + return "" + } + return string(d.Name[:d.Namlen]) +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet4 + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Len = SizeofSockaddrInet6 + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n >= len(sa.raw.Path) || n == 0 { + return nil, 0, EINVAL + } + sa.raw.Len = byte(3 + n) // 2 for Family, Len; 1 for NUL + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil +} + +func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { + // TODO(neeilan): Implement use of first param (fd) + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + // For z/OS, only replace NUL with @ when the + // length is not zero. + if pp.Len != 0 && pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // + // For z/OS, the length of the name is a field + // in the structure. To be on the safe side, we + // will still scan the name for a NUL but only + // to the length provided in the structure. + // + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < int(pp.Len) && pp.Path[n] != 0 { + n++ + } + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = int32(length) +} + +//sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ +//sys write(fd int, p []byte) (n int, err error) + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP +//sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL + +//sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A +//sys Chdir(path string) (err error) = SYS___CHDIR_A +//sys Chown(path string, uid int, gid int) (err error) = SYS___CHOWN_A +//sys Chmod(path string, mode uint32) (err error) = SYS___CHMOD_A +//sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A +//sys Dup(oldfd int) (fd int, err error) +//sys Dup2(oldfd int, newfd int) (err error) +//sys Exit(code int) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys fstat(fd int, stat *Stat_LE_t) (err error) + +func Fstat(fd int, stat *Stat_t) (err error) { + var statLE Stat_LE_t + err = fstat(fd, &statLE) + copyStat(stat, &statLE) + return +} + +//sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS +//sys Fsync(fd int) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT +//sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL +//sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES +//sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT + +//sys Mount(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A +//sys Unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys Chroot(path string) (err error) = SYS___CHROOT_A +//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A + +func Ptsname(fd int) (name string, err error) { + r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) + name = u2s(unsafe.Pointer(r0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func u2s(cstr unsafe.Pointer) string { + str := (*[1024]uint8)(cstr) + i := 0 + for str[i] != 0 { + i++ + } + return string(str[:i]) +} + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + for i := 0; e1 == EAGAIN && i < 10; i++ { + _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) + _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + } + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +// Dummy function: there are no semantics for Madvise on z/OS +func Madvise(b []byte, advice int) (err error) { + return +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +//sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A +//sysnb Getegid() (egid int) +//sysnb Geteuid() (uid int) +//sysnb Getgid() (gid int) +//sysnb Getpid() (pid int) +//sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID + +func Getpgrp() (pid int) { + pid, _ = Getpgid(0) + return +} + +//sysnb Getppid() (pid int) +//sys Getpriority(which int, who int) (prio int, err error) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_GETRLIMIT + +//sysnb getrusage(who int, rusage *rusage_zos) (err error) = SYS_GETRUSAGE + +func Getrusage(who int, rusage *Rusage) (err error) { + var ruz rusage_zos + err = getrusage(who, &ruz) + //Only the first two fields of Rusage are set + rusage.Utime.Sec = ruz.Utime.Sec + rusage.Utime.Usec = int64(ruz.Utime.Usec) + rusage.Stime.Sec = ruz.Stime.Sec + rusage.Stime.Usec = int64(ruz.Stime.Usec) + return +} + +//sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID +//sysnb Getuid() (uid int) +//sysnb Kill(pid int, sig Signal) (err error) +//sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A +//sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Listen(s int, n int) (err error) +//sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A + +func Lstat(path string, stat *Stat_t) (err error) { + var statLE Stat_LE_t + err = lstat(path, &statLE) + copyStat(stat, &statLE) + return +} + +//sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A +//sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Rmdir(path string) (err error) = SYS___RMDIR_A +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setpriority(which int, who int, prio int) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID +//sysnb Setrlimit(resource int, lim *Rlimit) (err error) +//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID +//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID +//sysnb Setsid() (pid int, err error) = SYS_SETSID +//sys Setuid(uid int) (err error) = SYS_SETUID +//sys Setgid(uid int) (err error) = SYS_SETGID +//sys Shutdown(fd int, how int) (err error) +//sys stat(path string, statLE *Stat_LE_t) (err error) = SYS___STAT_A + +func Stat(path string, sta *Stat_t) (err error) { + var statLE Stat_LE_t + err = stat(path, &statLE) + copyStat(sta, &statLE) + return +} + +//sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Sync() = SYS_SYNC +//sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A +//sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR +//sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR +//sys Umask(mask int) (oldmask int) +//sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A + +//sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A + +func Open(path string, mode int, perm uint32) (fd int, err error) { + return open(path, mode, perm) +} + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + wd, err := Getwd() + if err != nil { + return err + } + + if err := Fchdir(dirfd); err != nil { + return err + } + defer Chdir(wd) + + return Mkfifo(path, mode) +} + +//sys remove(path string) (err error) + +func Remove(path string) error { + return remove(path) +} + +const ImplementsGetwd = true + +func Getcwd(buf []byte) (n int, err error) { + var p unsafe.Pointer + if len(buf) > 0 { + p = unsafe.Pointer(&buf[0]) + } else { + p = unsafe.Pointer(&_zero) + } + _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + n = clen(buf) + 1 + if e != 0 { + err = errnoErr(e) + } + return +} + +func Getwd() (wd string, err error) { + var buf [PathMax]byte + n, err := Getcwd(buf[0:]) + if err != nil { + return "", err + } + // Getcwd returns the number of bytes written to buf, including the NUL. + if n < 1 || n > len(buf) || buf[n-1] != 0 { + return "", EINVAL + } + return string(buf[0 : n-1]), nil +} + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 1<<16 on Linux. + if n < 0 || n > 1<<20 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func gettid() uint64 + +func Gettid() (tid int) { + return int(gettid()) +} + +type WaitStatus uint32 + +// Wait status is 7 bits at bottom, either 0 (exited), +// 0x7F (stopped), or a signal number that caused an exit. +// The 0x80 bit is whether there was a core dump. +// An extra number (exit code, signal causing a stop) +// is in the high bits. At least that's the idea. +// There are various irregularities. For example, the +// "continued" status is 0xFFFF, distinguishing itself +// from stopped via the core dump bit. + +const ( + mask = 0x7F + core = 0x80 + exited = 0x00 + stopped = 0x7F + shift = 8 +) + +func (w WaitStatus) Exited() bool { return w&mask == exited } + +func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited } + +func (w WaitStatus) Stopped() bool { return w&0xFF == stopped } + +func (w WaitStatus) Continued() bool { return w == 0xFFFF } + +func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } + +func (w WaitStatus) ExitStatus() int { + if !w.Exited() { + return -1 + } + return int(w>>shift) & 0xFF +} + +func (w WaitStatus) Signal() Signal { + if !w.Signaled() { + return -1 + } + return Signal(w & mask) +} + +func (w WaitStatus) StopSignal() Signal { + if !w.Stopped() { + return -1 + } + return Signal(w>>shift) & 0xFF +} + +func (w WaitStatus) TrapCause() int { return -1 } + +//sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) + +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. + // At the moment rusage will not be touched. + var status _C_int + wpid, err = waitpid(pid, &status, options) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +//sysnb gettimeofday(tv *timeval_zos) (err error) + +func Gettimeofday(tv *Timeval) (err error) { + var tvz timeval_zos + err = gettimeofday(&tvz) + tv.Sec = tvz.Sec + tv.Usec = int64(tvz.Usec) + return +} + +func Time(t *Time_t) (tt Time_t, err error) { + var tv Timeval + err = Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { //fix + return Timeval{Sec: sec, Usec: usec} +} + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func UtimesNano(path string, ts []Timespec) error { + if len(ts) != 2 { + return EINVAL + } + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := [2]Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + // TODO(neeilan) : Remove this 0 ( added to get sys/unix compiling on z/OS ) + return anyToSockaddr(0, &rsa) +} + +const ( + // identifier constants + nwmHeaderIdentifier = 0xd5e6d4c8 + nwmFilterIdentifier = 0xd5e6d4c6 + nwmTCPConnIdentifier = 0xd5e6d4c3 + nwmRecHeaderIdentifier = 0xd5e6d4d9 + nwmIPStatsIdentifier = 0xd5e6d4c9d7e2e340 + nwmIPGStatsIdentifier = 0xd5e6d4c9d7c7e2e3 + nwmTCPStatsIdentifier = 0xd5e6d4e3c3d7e2e3 + nwmUDPStatsIdentifier = 0xd5e6d4e4c4d7e2e3 + nwmICMPGStatsEntry = 0xd5e6d4c9c3d4d7c7 + nwmICMPTStatsEntry = 0xd5e6d4c9c3d4d7e3 + + // nwmHeader constants + nwmVersion1 = 1 + nwmVersion2 = 2 + nwmCurrentVer = 2 + + nwmTCPConnType = 1 + nwmGlobalStatsType = 14 + + // nwmFilter constants + nwmFilterLclAddrMask = 0x20000000 // Local address + nwmFilterSrcAddrMask = 0x20000000 // Source address + nwmFilterLclPortMask = 0x10000000 // Local port + nwmFilterSrcPortMask = 0x10000000 // Source port + + // nwmConnEntry constants + nwmTCPStateClosed = 1 + nwmTCPStateListen = 2 + nwmTCPStateSynSent = 3 + nwmTCPStateSynRcvd = 4 + nwmTCPStateEstab = 5 + nwmTCPStateFinWait1 = 6 + nwmTCPStateFinWait2 = 7 + nwmTCPStateClosWait = 8 + nwmTCPStateLastAck = 9 + nwmTCPStateClosing = 10 + nwmTCPStateTimeWait = 11 + nwmTCPStateDeletTCB = 12 + + // Existing constants on linux + BPF_TCP_CLOSE = 1 + BPF_TCP_LISTEN = 2 + BPF_TCP_SYN_SENT = 3 + BPF_TCP_SYN_RECV = 4 + BPF_TCP_ESTABLISHED = 5 + BPF_TCP_FIN_WAIT1 = 6 + BPF_TCP_FIN_WAIT2 = 7 + BPF_TCP_CLOSE_WAIT = 8 + BPF_TCP_LAST_ACK = 9 + BPF_TCP_CLOSING = 10 + BPF_TCP_TIME_WAIT = 11 + BPF_TCP_NEW_SYN_RECV = -1 + BPF_TCP_MAX_STATES = -2 +) + +type nwmTriplet struct { + offset uint32 + length uint32 + number uint32 +} + +type nwmQuadruplet struct { + offset uint32 + length uint32 + number uint32 + match uint32 +} + +type nwmHeader struct { + ident uint32 + length uint32 + version uint16 + nwmType uint16 + bytesNeeded uint32 + options uint32 + _ [16]byte + inputDesc nwmTriplet + outputDesc nwmQuadruplet +} + +type nwmFilter struct { + ident uint32 + flags uint32 + resourceName [8]byte + resourceId uint32 + listenerId uint32 + local [28]byte // union of sockaddr4 and sockaddr6 + remote [28]byte // union of sockaddr4 and sockaddr6 + _ uint16 + _ uint16 + asid uint16 + _ [2]byte + tnLuName [8]byte + tnMonGrp uint32 + tnAppl [8]byte + applData [40]byte + nInterface [16]byte + dVipa [16]byte + dVipaPfx uint16 + dVipaPort uint16 + dVipaFamily byte + _ [3]byte + destXCF [16]byte + destXCFPfx uint16 + destXCFFamily byte + _ [1]byte + targIP [16]byte + targIPPfx uint16 + targIPFamily byte + _ [1]byte + _ [20]byte +} + +type nwmRecHeader struct { + ident uint32 + length uint32 + number byte + _ [3]byte +} + +type nwmTCPStatsEntry struct { + ident uint64 + currEstab uint32 + activeOpened uint32 + passiveOpened uint32 + connClosed uint32 + estabResets uint32 + attemptFails uint32 + passiveDrops uint32 + timeWaitReused uint32 + inSegs uint64 + predictAck uint32 + predictData uint32 + inDupAck uint32 + inBadSum uint32 + inBadLen uint32 + inShort uint32 + inDiscOldTime uint32 + inAllBeforeWin uint32 + inSomeBeforeWin uint32 + inAllAfterWin uint32 + inSomeAfterWin uint32 + inOutOfOrder uint32 + inAfterClose uint32 + inWinProbes uint32 + inWinUpdates uint32 + outWinUpdates uint32 + outSegs uint64 + outDelayAcks uint32 + outRsts uint32 + retransSegs uint32 + retransTimeouts uint32 + retransDrops uint32 + pmtuRetrans uint32 + pmtuErrors uint32 + outWinProbes uint32 + probeDrops uint32 + keepAliveProbes uint32 + keepAliveDrops uint32 + finwait2Drops uint32 + acceptCount uint64 + inBulkQSegs uint64 + inDiscards uint64 + connFloods uint32 + connStalls uint32 + cfgEphemDef uint16 + ephemInUse uint16 + ephemHiWater uint16 + flags byte + _ [1]byte + ephemExhaust uint32 + smcRCurrEstabLnks uint32 + smcRLnkActTimeOut uint32 + smcRActLnkOpened uint32 + smcRPasLnkOpened uint32 + smcRLnksClosed uint32 + smcRCurrEstab uint32 + smcRActiveOpened uint32 + smcRPassiveOpened uint32 + smcRConnClosed uint32 + smcRInSegs uint64 + smcROutSegs uint64 + smcRInRsts uint32 + smcROutRsts uint32 + smcDCurrEstabLnks uint32 + smcDActLnkOpened uint32 + smcDPasLnkOpened uint32 + smcDLnksClosed uint32 + smcDCurrEstab uint32 + smcDActiveOpened uint32 + smcDPassiveOpened uint32 + smcDConnClosed uint32 + smcDInSegs uint64 + smcDOutSegs uint64 + smcDInRsts uint32 + smcDOutRsts uint32 +} + +type nwmConnEntry struct { + ident uint32 + local [28]byte // union of sockaddr4 and sockaddr6 + remote [28]byte // union of sockaddr4 and sockaddr6 + startTime [8]byte // uint64, changed to prevent padding from being inserted + lastActivity [8]byte // uint64 + bytesIn [8]byte // uint64 + bytesOut [8]byte // uint64 + inSegs [8]byte // uint64 + outSegs [8]byte // uint64 + state uint16 + activeOpen byte + flag01 byte + outBuffered uint32 + inBuffered uint32 + maxSndWnd uint32 + reXmtCount uint32 + congestionWnd uint32 + ssThresh uint32 + roundTripTime uint32 + roundTripVar uint32 + sendMSS uint32 + sndWnd uint32 + rcvBufSize uint32 + sndBufSize uint32 + outOfOrderCount uint32 + lcl0WindowCount uint32 + rmt0WindowCount uint32 + dupacks uint32 + flag02 byte + sockOpt6Cont byte + asid uint16 + resourceName [8]byte + resourceId uint32 + subtask uint32 + sockOpt byte + sockOpt6 byte + clusterConnFlag byte + proto byte + targetAppl [8]byte + luName [8]byte + clientUserId [8]byte + logMode [8]byte + timeStamp uint32 + timeStampAge uint32 + serverResourceId uint32 + intfName [16]byte + ttlsStatPol byte + ttlsStatConn byte + ttlsSSLProt uint16 + ttlsNegCiph [2]byte + ttlsSecType byte + ttlsFIPS140Mode byte + ttlsUserID [8]byte + applData [40]byte + inOldestTime [8]byte // uint64 + outOldestTime [8]byte // uint64 + tcpTrustedPartner byte + _ [3]byte + bulkDataIntfName [16]byte + ttlsNegCiph4 [4]byte + smcReason uint32 + lclSMCLinkId uint32 + rmtSMCLinkId uint32 + smcStatus byte + smcFlags byte + _ [2]byte + rcvWnd uint32 + lclSMCBufSz uint32 + rmtSMCBufSz uint32 + ttlsSessID [32]byte + ttlsSessIDLen int16 + _ [1]byte + smcDStatus byte + smcDReason uint32 +} + +var svcNameTable [][]byte = [][]byte{ + []byte("\xc5\xe9\xc2\xd5\xd4\xc9\xc6\xf4"), // svc_EZBNMIF4 +} + +const ( + svc_EZBNMIF4 = 0 +) + +func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { + jobname := []byte("\x5c\x40\x40\x40\x40\x40\x40\x40") // "*" + responseBuffer := [4096]byte{0} + var bufferAlet, reasonCode uint32 = 0, 0 + var bufferLen, returnValue, returnCode int32 = 4096, 0, 0 + + dsa := [18]uint64{0} + var argv [7]unsafe.Pointer + argv[0] = unsafe.Pointer(&jobname[0]) + argv[1] = unsafe.Pointer(&responseBuffer[0]) + argv[2] = unsafe.Pointer(&bufferAlet) + argv[3] = unsafe.Pointer(&bufferLen) + argv[4] = unsafe.Pointer(&returnValue) + argv[5] = unsafe.Pointer(&returnCode) + argv[6] = unsafe.Pointer(&reasonCode) + + request := (*struct { + header nwmHeader + filter nwmFilter + })(unsafe.Pointer(&responseBuffer[0])) + + EZBNMIF4 := svcLoad(&svcNameTable[svc_EZBNMIF4][0]) + if EZBNMIF4 == nil { + return nil, errnoErr(EINVAL) + } + + // GetGlobalStats EZBNMIF4 call + request.header.ident = nwmHeaderIdentifier + request.header.length = uint32(unsafe.Sizeof(request.header)) + request.header.version = nwmCurrentVer + request.header.nwmType = nwmGlobalStatsType + request.header.options = 0x80000000 + + svcCall(EZBNMIF4, &argv[0], &dsa[0]) + + // outputDesc field is filled by EZBNMIF4 on success + if returnCode != 0 || request.header.outputDesc.offset == 0 { + return nil, errnoErr(EINVAL) + } + + // Check that EZBNMIF4 returned a nwmRecHeader + recHeader := (*nwmRecHeader)(unsafe.Pointer(&responseBuffer[request.header.outputDesc.offset])) + if recHeader.ident != nwmRecHeaderIdentifier { + return nil, errnoErr(EINVAL) + } + + // Parse nwmTriplets to get offsets of returned entries + var sections []*uint64 + var sectionDesc *nwmTriplet = (*nwmTriplet)(unsafe.Pointer(&responseBuffer[0])) + for i := uint32(0); i < uint32(recHeader.number); i++ { + offset := request.header.outputDesc.offset + uint32(unsafe.Sizeof(*recHeader)) + i*uint32(unsafe.Sizeof(*sectionDesc)) + sectionDesc = (*nwmTriplet)(unsafe.Pointer(&responseBuffer[offset])) + for j := uint32(0); j < sectionDesc.number; j++ { + offset = request.header.outputDesc.offset + sectionDesc.offset + j*sectionDesc.length + sections = append(sections, (*uint64)(unsafe.Pointer(&responseBuffer[offset]))) + } + } + + // Find nwmTCPStatsEntry in returned entries + var tcpStats *nwmTCPStatsEntry = nil + for _, ptr := range sections { + switch *ptr { + case nwmTCPStatsIdentifier: + if tcpStats != nil { + return nil, errnoErr(EINVAL) + } + tcpStats = (*nwmTCPStatsEntry)(unsafe.Pointer(ptr)) + case nwmIPStatsIdentifier: + case nwmIPGStatsIdentifier: + case nwmUDPStatsIdentifier: + case nwmICMPGStatsEntry: + case nwmICMPTStatsEntry: + default: + return nil, errnoErr(EINVAL) + } + } + if tcpStats == nil { + return nil, errnoErr(EINVAL) + } + + // GetConnectionDetail EZBNMIF4 call + responseBuffer = [4096]byte{0} + dsa = [18]uint64{0} + bufferAlet, reasonCode = 0, 0 + bufferLen, returnValue, returnCode = 4096, 0, 0 + nameptr := (*uint32)(unsafe.Pointer(uintptr(0x21c))) // Get jobname of current process + nameptr = (*uint32)(unsafe.Pointer(uintptr(*nameptr + 12))) + argv[0] = unsafe.Pointer(uintptr(*nameptr)) + + request.header.ident = nwmHeaderIdentifier + request.header.length = uint32(unsafe.Sizeof(request.header)) + request.header.version = nwmCurrentVer + request.header.nwmType = nwmTCPConnType + request.header.options = 0x80000000 + + request.filter.ident = nwmFilterIdentifier + + var localSockaddr RawSockaddrAny + socklen := _Socklen(SizeofSockaddrAny) + err := getsockname(fd, &localSockaddr, &socklen) + if err != nil { + return nil, errnoErr(EINVAL) + } + if localSockaddr.Addr.Family == AF_INET { + localSockaddr := (*RawSockaddrInet4)(unsafe.Pointer(&localSockaddr.Addr)) + localSockFilter := (*RawSockaddrInet4)(unsafe.Pointer(&request.filter.local[0])) + localSockFilter.Family = AF_INET + var i int + for i = 0; i < 4; i++ { + if localSockaddr.Addr[i] != 0 { + break + } + } + if i != 4 { + request.filter.flags |= nwmFilterLclAddrMask + for i = 0; i < 4; i++ { + localSockFilter.Addr[i] = localSockaddr.Addr[i] + } + } + if localSockaddr.Port != 0 { + request.filter.flags |= nwmFilterLclPortMask + localSockFilter.Port = localSockaddr.Port + } + } else if localSockaddr.Addr.Family == AF_INET6 { + localSockaddr := (*RawSockaddrInet6)(unsafe.Pointer(&localSockaddr.Addr)) + localSockFilter := (*RawSockaddrInet6)(unsafe.Pointer(&request.filter.local[0])) + localSockFilter.Family = AF_INET6 + var i int + for i = 0; i < 16; i++ { + if localSockaddr.Addr[i] != 0 { + break + } + } + if i != 16 { + request.filter.flags |= nwmFilterLclAddrMask + for i = 0; i < 16; i++ { + localSockFilter.Addr[i] = localSockaddr.Addr[i] + } + } + if localSockaddr.Port != 0 { + request.filter.flags |= nwmFilterLclPortMask + localSockFilter.Port = localSockaddr.Port + } + } + + svcCall(EZBNMIF4, &argv[0], &dsa[0]) + + // outputDesc field is filled by EZBNMIF4 on success + if returnCode != 0 || request.header.outputDesc.offset == 0 { + return nil, errnoErr(EINVAL) + } + + // Check that EZBNMIF4 returned a nwmConnEntry + conn := (*nwmConnEntry)(unsafe.Pointer(&responseBuffer[request.header.outputDesc.offset])) + if conn.ident != nwmTCPConnIdentifier { + return nil, errnoErr(EINVAL) + } + + // Copy data from the returned data structures into tcpInfo + // Stats from nwmConnEntry are specific to that connection. + // Stats from nwmTCPStatsEntry are global (to the interface?) + // Fields may not be an exact match. Some fields have no equivalent. + var tcpinfo TCPInfo + tcpinfo.State = uint8(conn.state) + tcpinfo.Ca_state = 0 // dummy + tcpinfo.Retransmits = uint8(tcpStats.retransSegs) + tcpinfo.Probes = uint8(tcpStats.outWinProbes) + tcpinfo.Backoff = 0 // dummy + tcpinfo.Options = 0 // dummy + tcpinfo.Rto = tcpStats.retransTimeouts + tcpinfo.Ato = tcpStats.outDelayAcks + tcpinfo.Snd_mss = conn.sendMSS + tcpinfo.Rcv_mss = conn.sendMSS // dummy + tcpinfo.Unacked = 0 // dummy + tcpinfo.Sacked = 0 // dummy + tcpinfo.Lost = 0 // dummy + tcpinfo.Retrans = conn.reXmtCount + tcpinfo.Fackets = 0 // dummy + tcpinfo.Last_data_sent = uint32(*(*uint64)(unsafe.Pointer(&conn.lastActivity[0]))) + tcpinfo.Last_ack_sent = uint32(*(*uint64)(unsafe.Pointer(&conn.outOldestTime[0]))) + tcpinfo.Last_data_recv = uint32(*(*uint64)(unsafe.Pointer(&conn.inOldestTime[0]))) + tcpinfo.Last_ack_recv = uint32(*(*uint64)(unsafe.Pointer(&conn.inOldestTime[0]))) + tcpinfo.Pmtu = conn.sendMSS // dummy, NWMIfRouteMtu is a candidate + tcpinfo.Rcv_ssthresh = conn.ssThresh + tcpinfo.Rtt = conn.roundTripTime + tcpinfo.Rttvar = conn.roundTripVar + tcpinfo.Snd_ssthresh = conn.ssThresh // dummy + tcpinfo.Snd_cwnd = conn.congestionWnd + tcpinfo.Advmss = conn.sendMSS // dummy + tcpinfo.Reordering = 0 // dummy + tcpinfo.Rcv_rtt = conn.roundTripTime // dummy + tcpinfo.Rcv_space = conn.sendMSS // dummy + tcpinfo.Total_retrans = conn.reXmtCount + + svcUnload(&svcNameTable[svc_EZBNMIF4][0], EZBNMIF4) + + return &tcpinfo, nil +} + +// GetsockoptString returns the string value of the socket option opt for the +// socket associated with fd at the given socket level. +func GetsockoptString(fd, level, opt int) (string, error) { + buf := make([]byte, 256) + vallen := _Socklen(len(buf)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&buf[0]), &vallen) + if err != nil { + return "", err + } + + return string(buf[:vallen-1]), nil +} + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = SizeofSockaddrAny + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + // TODO(neeilan): Remove 0 arg added to get this compiling on z/OS + from, err = anyToSockaddr(0, &rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + var err error + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = int32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + // send at least one normal byte + if len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err + } + dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) + runtime.KeepAlive(unsafe.Pointer(p)) + if e != 0 { + err = errnoErr(e) + } + return dir, err +} + +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + +func Readdir(dir uintptr) (*Dirent, error) { + var ent Dirent + var res uintptr + // __readdir_r_a returns errno at the end of the directory stream, rather than 0. + // Therefore to avoid false positives we clear errno before calling it. + + // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" + //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. + + e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) + var err error + if e != 0 { + err = errnoErr(Errno(e)) + } + if res == 0 { + return nil, err + } + return &ent, err +} + +func Closedir(dir uintptr) error { + _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) + if e != 0 { + return errnoErr(e) + } + return nil +} + +func Seekdir(dir uintptr, pos int) { + _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) +} + +func Telldir(dir uintptr) (int, error) { + p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + pos := int(p) + if pos == -1 { + return pos, errnoErr(e) + } + return pos, nil +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + // struct flock is packed on z/OS. We can't emulate that in Go so + // instead we pack it here. + var flock [24]byte + *(*int16)(unsafe.Pointer(&flock[0])) = lk.Type + *(*int16)(unsafe.Pointer(&flock[2])) = lk.Whence + *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start + *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len + *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid + _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) + lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) + lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) + lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) + lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) + if errno == 0 { + return nil + } + return errno +} + +func Flock(fd int, how int) error { + + var flock_type int16 + var fcntl_cmd int + + switch how { + case LOCK_SH | LOCK_NB: + flock_type = F_RDLCK + fcntl_cmd = F_SETLK + case LOCK_EX | LOCK_NB: + flock_type = F_WRLCK + fcntl_cmd = F_SETLK + case LOCK_EX: + flock_type = F_WRLCK + fcntl_cmd = F_SETLKW + case LOCK_UN: + flock_type = F_UNLCK + fcntl_cmd = F_SETLKW + default: + } + + flock := Flock_t{ + Type: int16(flock_type), + Whence: int16(0), + Start: int64(0), + Len: int64(0), + Pid: int32(Getppid()), + } + + err := FcntlFlock(uintptr(fd), fcntl_cmd, &flock) + return err +} + +func Mlock(b []byte) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Mlock2(b []byte, flags int) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Munlock(b []byte) (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func ClockGettime(clockid int32, ts *Timespec) error { + + var ticks_per_sec uint32 = 100 //TODO(kenan): value is currently hardcoded; need sysconf() call otherwise + var nsec_per_sec int64 = 1000000000 + + if ts == nil { + return EFAULT + } + if clockid == CLOCK_REALTIME || clockid == CLOCK_MONOTONIC { + var nanotime int64 = runtime.Nanotime1() + ts.Sec = nanotime / nsec_per_sec + ts.Nsec = nanotime % nsec_per_sec + } else if clockid == CLOCK_PROCESS_CPUTIME_ID || clockid == CLOCK_THREAD_CPUTIME_ID { + var tm Tms + _, err := Times(&tm) + if err != nil { + return EFAULT + } + ts.Sec = int64(tm.Utime / ticks_per_sec) + ts.Nsec = int64(tm.Utime) * nsec_per_sec / int64(ticks_per_sec) + } else { + return EINVAL + } + return nil +} + +func Statfs(path string, stat *Statfs_t) (err error) { + fd, err := open(path, O_RDONLY, 0) + defer Close(fd) + if err != nil { + return err + } + return Fstatfs(fd, stat) +} + +var ( + Stdin = 0 + Stdout = 1 + Stderr = 2 +) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +var ( + signalNameMapOnce sync.Once + signalNameMap map[string]syscall.Signal +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e Errno) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + } + return e +} + +// ErrnoName returns the error name for error number e. +func ErrnoName(e Errno) string { + i := sort.Search(len(errorList), func(i int) bool { + return errorList[i].num >= e + }) + if i < len(errorList) && errorList[i].num == e { + return errorList[i].name + } + return "" +} + +// SignalName returns the signal name for signal number s. +func SignalName(s syscall.Signal) string { + i := sort.Search(len(signalList), func(i int) bool { + return signalList[i].num >= s + }) + if i < len(signalList) && signalList[i].num == s { + return signalList[i].name + } + return "" +} + +// SignalNum returns the syscall.Signal for signal named s, +// or 0 if a signal with such name is not found. +// The signal name should start with "SIG". +func SignalNum(s string) syscall.Signal { + signalNameMapOnce.Do(func() { + signalNameMap = make(map[string]syscall.Signal, len(signalList)) + for _, signal := range signalList { + signalNameMap[signal.name] = signal.num + } + }) + return signalNameMap[s] +} + +// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. +func clen(n []byte) int { + i := bytes.IndexByte(n, 0) + if i == -1 { + i = len(n) + } + return i +} + +// Mmap manager, for use by operating system-specific implementations. + +type mmapper struct { + sync.Mutex + active map[*byte][]byte // active mappings; key is last byte in mapping + mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error) + munmap func(addr uintptr, length uintptr) error +} + +func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + if length <= 0 { + return nil, EINVAL + } + + // Map the requested memory. + addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) + if errno != nil { + return nil, errno + } + + // Slice memory layout + var sl = struct { + addr uintptr + len int + cap int + }{addr, length, length} + + // Use unsafe to turn sl into a []byte. + b := *(*[]byte)(unsafe.Pointer(&sl)) + + // Register mapping in m and return it. + p := &b[cap(b)-1] + m.Lock() + defer m.Unlock() + m.active[p] = b + return b, nil +} + +func (m *mmapper) Munmap(data []byte) (err error) { + if len(data) == 0 || len(data) != cap(data) { + return EINVAL + } + + // Find the base of the mapping. + p := &data[cap(data)-1] + m.Lock() + defer m.Unlock() + b := m.active[p] + if b == nil || &b[0] != &data[0] { + return EINVAL + } + + // Unmap the memory and update m. + if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil { + return errno + } + delete(m.active, p) + return nil +} + +func Read(fd int, p []byte) (n int, err error) { + n, err = read(fd, p) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Write(fd int, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = write(fd, p) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +// Sockaddr represents a socket address. +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs +} + +// SockaddrInet4 implements the Sockaddr interface for AF_INET type sockets. +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +// SockaddrInet6 implements the Sockaddr interface for AF_INET6 type sockets. +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +// SockaddrUnix implements the Sockaddr interface for AF_UNIX type sockets. +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func Bind(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd int, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getpeername(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getpeername(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(fd, &rsa) +} + +func GetsockoptByte(fd, level, opt int) (value byte, err error) { + var n byte + vallen := _Socklen(1) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func GetsockoptInt(fd, level, opt int) (value int, err error) { + var n int32 + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return int(n), err +} + +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptLinger(fd, level, opt int) (*Linger, error) { + var linger Linger + vallen := _Socklen(SizeofLinger) + err := getsockopt(fd, level, opt, unsafe.Pointer(&linger), &vallen) + return &linger, err +} + +func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) { + var tv Timeval + vallen := _Socklen(unsafe.Sizeof(tv)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&tv), &vallen) + return &tv, err +} + +func GetsockoptUint64(fd, level, opt int) (value uint64, err error) { + var n uint64 + vallen := _Socklen(8) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + +func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil { + return + } + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { + ptr, n, err := to.sockaddr() + if err != nil { + return err + } + return sendto(fd, p, flags, ptr, n) +} + +func SetsockoptByte(fd, level, opt int, value byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1) +} + +func SetsockoptInt(fd, level, opt int, value int) (err error) { + var n = int32(value) + return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4) +} + +func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4) +} + +func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq) +} + +func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq) +} + +func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error { + return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter) +} + +func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger) +} + +func SetsockoptString(fd, level, opt int, s string) (err error) { + var p unsafe.Pointer + if len(s) > 0 { + p = unsafe.Pointer(&[]byte(s)[0]) + } + return setsockopt(fd, level, opt, p, uintptr(len(s))) +} + +func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv)) +} + +func SetsockoptUint64(fd, level, opt int, value uint64) (err error) { + return setsockopt(fd, level, opt, unsafe.Pointer(&value), 8) +} + +func Socket(domain, typ, proto int) (fd int, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return -1, EAFNOSUPPORT + } + fd, err = socket(domain, typ, proto) + return +} + +func Socketpair(domain, typ, proto int) (fd [2]int, err error) { + var fdx [2]int32 + err = socketpair(domain, typ, proto, &fdx) + if err == nil { + fd[0] = int(fdx[0]) + fd[1] = int(fdx[1]) + } + return +} + +var ioSync int64 + +func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) } + +func SetNonblock(fd int, nonblocking bool) (err error) { + flag, err := fcntl(fd, F_GETFL, 0) + if err != nil { + return err + } + if nonblocking { + flag |= O_NONBLOCK + } else { + flag &= ^O_NONBLOCK + } + _, err = fcntl(fd, F_SETFL, flag) + return err +} + +// Exec calls execve(2), which replaces the calling executable in the process +// tree. argv0 should be the full path to an executable ("/bin/ls") and the +// executable name should also be the first argument in argv (["ls", "-l"]). +// envv are the environment variables that should be passed to the new +// process (["USER=go", "PWD=/tmp"]). +func Exec(argv0 string, argv []string, envv []string) error { + return syscall.Exec(argv0, argv, envv) +} diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 103604299e2..3d893040553 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 30c1d71f4ed..25df1e37801 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || netbsd // +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 104994bc6a9..ca9799b79ef 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -1,6 +1,7 @@ // mkerrors.sh -maix32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc && aix // +build ppc,aix // Created by cgo -godefs - DO NOT EDIT diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 4fc8d306493..200c8c26fe6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -maix64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && aix // +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index ec376f51bc4..7ee196f7fcc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && darwin // +build 386,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index dcb96c26c08..991996b6091 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -775,15 +776,24 @@ const ( IPV6_2292PKTINFO = 0x13 IPV6_2292PKTOPTIONS = 0x19 IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b IPV6_BINDV6ONLY = 0x1b IPV6_BOUND_IF = 0x7d IPV6_CHECKSUM = 0x1a IPV6_DEFAULT_MULTICAST_HOPS = 0x1 IPV6_DEFAULT_MULTICAST_LOOP = 0x1 IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -795,6 +805,8 @@ const ( IPV6_FW_GET = 0x22 IPV6_FW_ZERO = 0x21 IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd @@ -806,20 +818,34 @@ const ( IPV6_MAX_SOCK_SRC_FILTER = 0x80 IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe IPV6_PORTRANGE_DEFAULT = 0x0 IPV6_PORTRANGE_HIGH = 0x1 IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 IPV6_RTHDR_LOOSE = 0x0 IPV6_RTHDR_STRICT = 0x1 IPV6_RTHDR_TYPE_0 = 0x0 IPV6_SOCKOPT_RESERVED1 = 0x3 IPV6_TCLASS = 0x24 IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index 03feefbf8c9..e748cb11057 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && darwin // +build arm,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 8602b136332..e644eaf5e75 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -775,15 +776,24 @@ const ( IPV6_2292PKTINFO = 0x13 IPV6_2292PKTOPTIONS = 0x19 IPV6_2292RTHDR = 0x18 + IPV6_3542DSTOPTS = 0x32 + IPV6_3542HOPLIMIT = 0x2f + IPV6_3542HOPOPTS = 0x31 + IPV6_3542NEXTHOP = 0x30 + IPV6_3542PKTINFO = 0x2e + IPV6_3542RTHDR = 0x33 IPV6_ADDR_MC_FLAGS_PREFIX = 0x20 IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10 IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30 + IPV6_AUTOFLOWLABEL = 0x3b IPV6_BINDV6ONLY = 0x1b IPV6_BOUND_IF = 0x7d IPV6_CHECKSUM = 0x1a IPV6_DEFAULT_MULTICAST_HOPS = 0x1 IPV6_DEFAULT_MULTICAST_LOOP = 0x1 IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 @@ -795,6 +805,8 @@ const ( IPV6_FW_GET = 0x22 IPV6_FW_ZERO = 0x21 IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 IPV6_IPSEC_POLICY = 0x1c IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd @@ -806,20 +818,34 @@ const ( IPV6_MAX_SOCK_SRC_FILTER = 0x80 IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe IPV6_PORTRANGE_DEFAULT = 0x0 IPV6_PORTRANGE_HIGH = 0x1 IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x3d + IPV6_RECVRTHDR = 0x26 IPV6_RECVTCLASS = 0x23 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x39 IPV6_RTHDR_LOOSE = 0x0 IPV6_RTHDR_STRICT = 0x1 IPV6_RTHDR_TYPE_0 = 0x0 IPV6_SOCKOPT_RESERVED1 = 0x3 IPV6_TCLASS = 0x24 IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index f5e91b7abaa..17bba0e44f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 3689c808481..9c7c5e16546 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1375,6 +1381,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index b8f7c3c930a..b265abb25f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1376,6 +1382,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index be14bb1a4cd..3df99f285f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -980,6 +981,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1016,6 +1022,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1341,6 +1356,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 7ce9c0081a8..218d39906da 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -997,6 +998,11 @@ const ( KERN_OSRELEASE = 0x2 KERN_OSTYPE = 0x1 KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 LOCK_NB = 0x4 LOCK_SH = 0x1 @@ -1376,6 +1382,7 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 SOL_SOCKET = 0xffff SOMAXCONN = 0x80 SO_ACCEPTCONN = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index c5e2f479304..35de419c6db 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -244,6 +245,10 @@ const ( BS0 = 0x0 BTRFS_SUPER_MAGIC = 0x9123683e BTRFS_TEST_MAGIC = 0x73727279 + BUS_BLUETOOTH = 0x5 + BUS_HIL = 0x4 + BUS_USB = 0x3 + BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -963,11 +968,17 @@ const ( HDIO_SET_XFER = 0x306 HDIO_TRISTATE_HWIF = 0x31b HDIO_UNREGISTER_HWIF = 0x32a + HID_MAX_DESCRIPTOR_SIZE = 0x1000 HOSTFS_SUPER_MAGIC = 0xc0ffee HPFS_SUPER_MAGIC = 0xf995e849 HUGETLBFS_MAGIC = 0x958458f6 IBSHIFT = 0x10 ICMPV6_FILTER = 0x1 + ICMPV6_FILTER_BLOCK = 0x1 + ICMPV6_FILTER_BLOCKOTHERS = 0x3 + ICMPV6_FILTER_PASS = 0x2 + ICMPV6_FILTER_PASSONLY = 0x4 + ICMP_FILTER = 0x1 ICRNL = 0x100 IFA_F_DADFAILED = 0x8 IFA_F_DEPRECATED = 0x20 @@ -2735,6 +2746,9 @@ const ( Z3FOLD_MAGIC = 0x33 ZONEFS_MAGIC = 0x5a4f4653 ZSMALLOC_MAGIC = 0x58295829 + _HIDIOCGRAWNAME_LEN = 0x80 + _HIDIOCGRAWPHYS_LEN = 0x40 + _HIDIOCGRAWUNIQ_LEN = 0x40 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c8d5b324c75..e91a1a95792 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -93,6 +94,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -485,6 +489,9 @@ const ( X86_FXSR_MAGIC = 0x0 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index e605c08b0ec..a9cbac64439 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -93,6 +94,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -485,6 +489,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 0279fa1abda..d74f3c15a1d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -491,6 +495,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 20c286a1122..e1538995b49 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -95,6 +96,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -482,6 +486,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 1785f33d669..5e8e71ff863 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -487,6 +491,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index acb1ef1bf55..e670ee14814 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -487,6 +491,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 468a4e68c7f..dd11eacb81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -487,6 +491,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 6c9c7f2c76b..a0a5b22ae93 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x18 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -487,6 +491,9 @@ const ( WORDSIZE = 0x20 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 8961206e5f4..e60102f6a92 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -547,6 +551,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4000 XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 6bcf79dc2e6..838ff4ea6d0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -547,6 +551,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4000 XTABS = 0xc00 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e861d97f1c0..7cc98f09c3e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -472,6 +476,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index d39278bee59..a508392d258 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -92,6 +93,9 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -545,6 +549,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 6a742fa7ee3..d5e2dc94faa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,6 +1,7 @@ // mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -96,6 +97,9 @@ const ( F_SETOWN = 0x6 F_UNLCK = 0x3 F_WRLCK = 0x2 + HIDIOCGRAWINFO = 0x40084803 + HIDIOCGRDESC = 0x50044802 + HIDIOCGRDESCSIZE = 0x40044801 HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -535,6 +539,9 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x40804804 + _HIDIOCGRAWPHYS = 0x40404805 + _HIDIOCGRAWUNIQ = 0x40404808 __TIOCFLUSH = 0x80047410 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 20f3a5799fd..72f7420d20a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 90b8fcd29c5..8d4eb0c0804 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index c5c03993b67..9eef9749f6a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh -marm // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 14dd3c1d1ee..3b62ba192c3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index c865a10df44..593cc0feffa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1,6 +1,7 @@ // mkerrors.sh -m32 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 9db6b2fb6e2..25cb6094813 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 7072526a640..a4e4c22314e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1,6 +1,7 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ac5efbe5ac7..90de7dfc33a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index a74639a46fb..f1154ff56f6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 5312c36cc82..1afee6a0890 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -1,6 +1,7 @@ // mkerrors.sh -m64 // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && solaris // +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -365,6 +366,7 @@ const ( HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 + ICMP6_FILTER = 0x1 ICRNL = 0x100 IEXTEN = 0x8000 IFF_ADDRCONF = 0x80000 @@ -611,6 +613,7 @@ const ( IP_RECVPKTINFO = 0x1a IP_RECVRETOPTS = 0x6 IP_RECVSLLA = 0xa + IP_RECVTOS = 0xc IP_RECVTTL = 0xb IP_RETOPTS = 0x8 IP_REUSEADDR = 0x104 @@ -703,6 +706,7 @@ const ( O_APPEND = 0x8 O_CLOEXEC = 0x800000 O_CREAT = 0x100 + O_DIRECT = 0x2000000 O_DIRECTORY = 0x1000000 O_DSYNC = 0x40 O_EXCL = 0x400 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go new file mode 100644 index 00000000000..c8c79090387 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -0,0 +1,832 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Hand edited based on zerrors_linux_s390x.go +// TODO: auto-generate. + +package unix + +const ( + BRKINT = 0x0001 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + IP6F_MORE_FRAG = 0x0001 + IP6F_OFF_MASK = 0xfff8 + IP6F_RESERVED_MASK = 0x0006 + IP6OPT_JUMBO = 0xc2 + IP6OPT_JUMBO_LEN = 6 + IP6OPT_MUTABLE = 0x20 + IP6OPT_NSAP_ADDR = 0xc3 + IP6OPT_PAD1 = 0x00 + IP6OPT_PADN = 0x01 + IP6OPT_ROUTER_ALERT = 0x05 + IP6OPT_TUNNEL_LIMIT = 0x04 + IP6OPT_TYPE_DISCARD = 0x40 + IP6OPT_TYPE_FORCEICMP = 0x80 + IP6OPT_TYPE_ICMP = 0xc0 + IP6OPT_TYPE_SKIP = 0x00 + IP6_ALERT_AN = 0x0002 + IP6_ALERT_MLD = 0x0000 + IP6_ALERT_RSVP = 0x0001 + IPPORT_RESERVED = 1024 + IPPORT_USERRESERVED = 5000 + IPPROTO_AH = 51 + IPPROTO_DSTOPTS = 60 + IPPROTO_EGP = 8 + IPPROTO_ESP = 50 + IPPROTO_FRAGMENT = 44 + IPPROTO_GGP = 2 + IPPROTO_HOPOPTS = 0 + IPPROTO_ICMP = 1 + IPPROTO_ICMPV6 = 58 + IPPROTO_IDP = 22 + IPPROTO_IP = 0 + IPPROTO_IPV6 = 41 + IPPROTO_MAX = 256 + IPPROTO_NONE = 59 + IPPROTO_PUP = 12 + IPPROTO_RAW = 255 + IPPROTO_ROUTING = 43 + IPPROTO_TCP = 6 + IPPROTO_UDP = 17 + IPV6_ADDR_PREFERENCES = 32 + IPV6_CHECKSUM = 19 + IPV6_DONTFRAG = 29 + IPV6_DSTOPTS = 23 + IPV6_HOPLIMIT = 11 + IPV6_HOPOPTS = 22 + IPV6_JOIN_GROUP = 5 + IPV6_LEAVE_GROUP = 6 + IPV6_MULTICAST_HOPS = 9 + IPV6_MULTICAST_IF = 7 + IPV6_MULTICAST_LOOP = 4 + IPV6_NEXTHOP = 20 + IPV6_PATHMTU = 12 + IPV6_PKTINFO = 13 + IPV6_PREFER_SRC_CGA = 0x10 + IPV6_PREFER_SRC_COA = 0x02 + IPV6_PREFER_SRC_HOME = 0x01 + IPV6_PREFER_SRC_NONCGA = 0x20 + IPV6_PREFER_SRC_PUBLIC = 0x08 + IPV6_PREFER_SRC_TMP = 0x04 + IPV6_RECVDSTOPTS = 28 + IPV6_RECVHOPLIMIT = 14 + IPV6_RECVHOPOPTS = 26 + IPV6_RECVPATHMTU = 16 + IPV6_RECVPKTINFO = 15 + IPV6_RECVRTHDR = 25 + IPV6_RECVTCLASS = 31 + IPV6_RTHDR = 21 + IPV6_RTHDRDSTOPTS = 24 + IPV6_RTHDR_TYPE_0 = 0 + IPV6_TCLASS = 30 + IPV6_UNICAST_HOPS = 3 + IPV6_USE_MIN_MTU = 18 + IPV6_V6ONLY = 10 + IP_ADD_MEMBERSHIP = 5 + IP_ADD_SOURCE_MEMBERSHIP = 12 + IP_BLOCK_SOURCE = 10 + IP_DEFAULT_MULTICAST_LOOP = 1 + IP_DEFAULT_MULTICAST_TTL = 1 + IP_DROP_MEMBERSHIP = 6 + IP_DROP_SOURCE_MEMBERSHIP = 13 + IP_MAX_MEMBERSHIPS = 20 + IP_MULTICAST_IF = 7 + IP_MULTICAST_LOOP = 4 + IP_MULTICAST_TTL = 3 + IP_OPTIONS = 1 + IP_PKTINFO = 101 + IP_RECVPKTINFO = 102 + IP_TOS = 2 + IP_TTL = 3 + IP_UNBLOCK_SOURCE = 11 + ICANON = 0x0010 + ICMP6_FILTER = 0x26 + ICRNL = 0x0002 + IEXTEN = 0x0020 + IGNBRK = 0x0004 + IGNCR = 0x0008 + INLCR = 0x0020 + ISIG = 0x0040 + ISTRIP = 0x0080 + IXON = 0x0200 + IXOFF = 0x0100 + LOCK_SH = 0x1 // Not exist on zOS + LOCK_EX = 0x2 // Not exist on zOS + LOCK_NB = 0x4 // Not exist on zOS + LOCK_UN = 0x8 // Not exist on zOS + POLLIN = 0x0003 + POLLOUT = 0x0004 + POLLPRI = 0x0010 + POLLERR = 0x0020 + POLLHUP = 0x0040 + POLLNVAL = 0x0080 + PROT_READ = 0x1 // mmap - page can be read + PROT_WRITE = 0x2 // page can be written + PROT_NONE = 0x4 // can't be accessed + PROT_EXEC = 0x8 // can be executed + MAP_PRIVATE = 0x1 // changes are private + MAP_SHARED = 0x2 // changes are shared + MAP_FIXED = 0x4 // place exactly + MS_SYNC = 0x1 // msync - synchronous writes + MS_ASYNC = 0x2 // asynchronous writes + MS_INVALIDATE = 0x4 // invalidate mappings + MTM_RDONLY = 0x80000000 + MTM_RDWR = 0x40000000 + MTM_UMOUNT = 0x10000000 + MTM_IMMED = 0x08000000 + MTM_FORCE = 0x04000000 + MTM_DRAIN = 0x02000000 + MTM_RESET = 0x01000000 + MTM_SAMEMODE = 0x00100000 + MTM_UNQSEFORCE = 0x00040000 + MTM_NOSUID = 0x00000400 + MTM_SYNCHONLY = 0x00000200 + MTM_REMOUNT = 0x00000100 + MTM_NOSECURITY = 0x00000080 + O_ACCMODE = 0x03 + O_APPEND = 0x08 + O_ASYNCSIG = 0x0200 + O_CREAT = 0x80 + O_EXCL = 0x40 + O_GETFL = 0x0F + O_LARGEFILE = 0x0400 + O_NONBLOCK = 0x04 + O_RDONLY = 0x02 + O_RDWR = 0x03 + O_SYNC = 0x0100 + O_TRUNC = 0x10 + O_WRONLY = 0x01 + O_NOCTTY = 0x20 + OPOST = 0x0001 + ONLCR = 0x0004 + PARENB = 0x0200 + PARMRK = 0x0400 + QUERYCVT = 3 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 // RUSAGE_THREAD unsupported on z/OS + SEEK_CUR = 1 + SEEK_END = 2 + SEEK_SET = 0 + SETAUTOCVTALL = 5 + SETAUTOCVTON = 2 + SETCVTALL = 4 + SETCVTOFF = 0 + SETCVTON = 1 + AF_APPLETALK = 16 + AF_CCITT = 10 + AF_CHAOS = 5 + AF_DATAKIT = 9 + AF_DLI = 13 + AF_ECMA = 8 + AF_HYLINK = 15 + AF_IMPLINK = 3 + AF_INET = 2 + AF_INET6 = 19 + AF_INTF = 20 + AF_IUCV = 17 + AF_LAT = 14 + AF_LINK = 18 + AF_MAX = 30 + AF_NBS = 7 + AF_NDD = 23 + AF_NETWARE = 22 + AF_NS = 6 + AF_PUP = 4 + AF_RIF = 21 + AF_ROUTE = 20 + AF_SNA = 11 + AF_UNIX = 1 + AF_UNSPEC = 0 + IBMTCP_IMAGE = 1 + MSG_ACK_EXPECTED = 0x10 + MSG_ACK_GEN = 0x40 + MSG_ACK_TIMEOUT = 0x20 + MSG_CONNTERM = 0x80 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_EOF = 0x8000 + MSG_EOR = 0x8 + MSG_MAXIOVLEN = 16 + MSG_NONBLOCK = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + PRIO_PROCESS = 1 + PRIO_PGRP = 2 + PRIO_USER = 3 + RLIMIT_CPU = 0 + RLIMIT_FSIZE = 1 + RLIMIT_DATA = 2 + RLIMIT_STACK = 3 + RLIMIT_CORE = 4 + RLIMIT_AS = 5 + RLIMIT_NOFILE = 6 + RLIMIT_MEMLIMIT = 7 + RLIM_INFINITY = 2147483647 + SCM_RIGHTS = 0x01 + SF_CLOSE = 0x00000002 + SF_REUSE = 0x00000001 + SHUT_RD = 0 + SHUT_RDWR = 2 + SHUT_WR = 1 + SOCK_CONN_DGRAM = 6 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_RDM = 4 + SOCK_SEQPACKET = 5 + SOCK_STREAM = 1 + SOL_SOCKET = 0xffff + SOMAXCONN = 10 + SO_ACCEPTCONN = 0x0002 + SO_ACCEPTECONNABORTED = 0x0006 + SO_ACKNOW = 0x7700 + SO_BROADCAST = 0x0020 + SO_BULKMODE = 0x8000 + SO_CKSUMRECV = 0x0800 + SO_CLOSE = 0x01 + SO_CLUSTERCONNTYPE = 0x00004001 + SO_CLUSTERCONNTYPE_INTERNAL = 8 + SO_CLUSTERCONNTYPE_NOCONN = 0 + SO_CLUSTERCONNTYPE_NONE = 1 + SO_CLUSTERCONNTYPE_SAME_CLUSTER = 2 + SO_CLUSTERCONNTYPE_SAME_IMAGE = 4 + SO_DEBUG = 0x0001 + SO_DONTROUTE = 0x0010 + SO_ERROR = 0x1007 + SO_IGNOREINCOMINGPUSH = 0x1 + SO_IGNORESOURCEVIPA = 0x0002 + SO_KEEPALIVE = 0x0008 + SO_LINGER = 0x0080 + SO_NONBLOCKLOCAL = 0x8001 + SO_NOREUSEADDR = 0x1000 + SO_OOBINLINE = 0x0100 + SO_OPTACK = 0x8004 + SO_OPTMSS = 0x8003 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x0004 + SO_REUSEPORT = 0x0200 + SO_SECINFO = 0x00004002 + SO_SET = 0x0200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TYPE = 0x1008 + SO_UNSET = 0x0400 + SO_USELOOPBACK = 0x0040 + SO_USE_IFBUFS = 0x0400 + S_ISUID = 0x0800 + S_ISGID = 0x0400 + S_ISVTX = 0x0200 + S_IRUSR = 0x0100 + S_IWUSR = 0x0080 + S_IXUSR = 0x0040 + S_IRWXU = 0x01C0 + S_IRGRP = 0x0020 + S_IWGRP = 0x0010 + S_IXGRP = 0x0008 + S_IRWXG = 0x0038 + S_IROTH = 0x0004 + S_IWOTH = 0x0002 + S_IXOTH = 0x0001 + S_IRWXO = 0x0007 + S_IREAD = S_IRUSR + S_IWRITE = S_IWUSR + S_IEXEC = S_IXUSR + S_IFDIR = 0x01000000 + S_IFCHR = 0x02000000 + S_IFREG = 0x03000000 + S_IFFIFO = 0x04000000 + S_IFIFO = 0x04000000 + S_IFLNK = 0x05000000 + S_IFBLK = 0x06000000 + S_IFSOCK = 0x07000000 + S_IFVMEXTL = 0xFE000000 + S_IFVMEXTL_EXEC = 0x00010000 + S_IFVMEXTL_DATA = 0x00020000 + S_IFVMEXTL_MEL = 0x00030000 + S_IFEXTL = 0x00000001 + S_IFPROGCTL = 0x00000002 + S_IFAPFCTL = 0x00000004 + S_IFNOSHARE = 0x00000008 + S_IFSHARELIB = 0x00000010 + S_IFMT = 0xFF000000 + S_IFMST = 0x00FF0000 + TCP_KEEPALIVE = 0x8 + TCP_NODELAY = 0x1 + TIOCGWINSZ = 0x4008a368 + TIOCSWINSZ = 0x8008a367 + TIOCSBRK = 0x2000a77b + TIOCCBRK = 0x2000a77a + TIOCSTI = 0x8001a772 + TIOCGPGRP = 0x4004a777 // _IOR(167, 119, int) + TCSANOW = 0 + TCSETS = 0 // equivalent to TCSANOW for tcsetattr + TCSADRAIN = 1 + TCSETSW = 1 // equivalent to TCSADRAIN for tcsetattr + TCSAFLUSH = 2 + TCSETSF = 2 // equivalent to TCSAFLUSH for tcsetattr + TCGETS = 3 // not defined in ioctl.h -- zos golang only + TCIFLUSH = 0 + TCOFLUSH = 1 + TCIOFLUSH = 2 + TCOOFF = 0 + TCOON = 1 + TCIOFF = 2 + TCION = 3 + TIOCSPGRP = 0x8004a776 + TIOCNOTTY = 0x2000a771 + TIOCEXCL = 0x2000a70d + TIOCNXCL = 0x2000a70e + TIOCGETD = 0x4004a700 + TIOCSETD = 0x8004a701 + TIOCPKT = 0x8004a770 + TIOCSTOP = 0x2000a76f + TIOCSTART = 0x2000a76e + TIOCUCNTL = 0x8004a766 + TIOCREMOTE = 0x8004a769 + TIOCMGET = 0x4004a76a + TIOCMSET = 0x8004a76d + TIOCMBIC = 0x8004a76b + TIOCMBIS = 0x8004a76c + VINTR = 0 + VQUIT = 1 + VERASE = 2 + VKILL = 3 + VEOF = 4 + VEOL = 5 + VMIN = 6 + VSTART = 7 + VSTOP = 8 + VSUSP = 9 + VTIME = 10 + WCONTINUED = 0x4 + WNOHANG = 0x1 + WUNTRACED = 0x2 + _BPX_SWAP = 1 + _BPX_NONSWAP = 2 + MCL_CURRENT = 1 // for Linux compatibility -- no zos semantics + MCL_FUTURE = 2 // for Linux compatibility -- no zos semantics + MCL_ONFAULT = 3 // for Linux compatibility -- no zos semantics + MADV_NORMAL = 0 // for Linux compatibility -- no zos semantics + MADV_RANDOM = 1 // for Linux compatibility -- no zos semantics + MADV_SEQUENTIAL = 2 // for Linux compatibility -- no zos semantics + MADV_WILLNEED = 3 // for Linux compatibility -- no zos semantics + MADV_REMOVE = 4 // for Linux compatibility -- no zos semantics + MADV_DONTFORK = 5 // for Linux compatibility -- no zos semantics + MADV_DOFORK = 6 // for Linux compatibility -- no zos semantics + MADV_HWPOISON = 7 // for Linux compatibility -- no zos semantics + MADV_MERGEABLE = 8 // for Linux compatibility -- no zos semantics + MADV_UNMERGEABLE = 9 // for Linux compatibility -- no zos semantics + MADV_SOFT_OFFLINE = 10 // for Linux compatibility -- no zos semantics + MADV_HUGEPAGE = 11 // for Linux compatibility -- no zos semantics + MADV_NOHUGEPAGE = 12 // for Linux compatibility -- no zos semantics + MADV_DONTDUMP = 13 // for Linux compatibility -- no zos semantics + MADV_DODUMP = 14 // for Linux compatibility -- no zos semantics + MADV_FREE = 15 // for Linux compatibility -- no zos semantics + MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics + MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics + AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics + AT_FDCWD = 2 // for Unix compatibility -- no zos semantics +) + +const ( + EDOM = Errno(1) + ERANGE = Errno(2) + EACCES = Errno(111) + EAGAIN = Errno(112) + EBADF = Errno(113) + EBUSY = Errno(114) + ECHILD = Errno(115) + EDEADLK = Errno(116) + EEXIST = Errno(117) + EFAULT = Errno(118) + EFBIG = Errno(119) + EINTR = Errno(120) + EINVAL = Errno(121) + EIO = Errno(122) + EISDIR = Errno(123) + EMFILE = Errno(124) + EMLINK = Errno(125) + ENAMETOOLONG = Errno(126) + ENFILE = Errno(127) + ENODEV = Errno(128) + ENOENT = Errno(129) + ENOEXEC = Errno(130) + ENOLCK = Errno(131) + ENOMEM = Errno(132) + ENOSPC = Errno(133) + ENOSYS = Errno(134) + ENOTDIR = Errno(135) + ENOTEMPTY = Errno(136) + ENOTTY = Errno(137) + ENXIO = Errno(138) + EPERM = Errno(139) + EPIPE = Errno(140) + EROFS = Errno(141) + ESPIPE = Errno(142) + ESRCH = Errno(143) + EXDEV = Errno(144) + E2BIG = Errno(145) + ELOOP = Errno(146) + EILSEQ = Errno(147) + ENODATA = Errno(148) + EOVERFLOW = Errno(149) + EMVSNOTUP = Errno(150) + ECMSSTORAGE = Errno(151) + EMVSDYNALC = Errno(151) + EMVSCVAF = Errno(152) + EMVSCATLG = Errno(153) + ECMSINITIAL = Errno(156) + EMVSINITIAL = Errno(156) + ECMSERR = Errno(157) + EMVSERR = Errno(157) + EMVSPARM = Errno(158) + ECMSPFSFILE = Errno(159) + EMVSPFSFILE = Errno(159) + EMVSBADCHAR = Errno(160) + ECMSPFSPERM = Errno(162) + EMVSPFSPERM = Errno(162) + EMVSSAFEXTRERR = Errno(163) + EMVSSAF2ERR = Errno(164) + EMVSTODNOTSET = Errno(165) + EMVSPATHOPTS = Errno(166) + EMVSNORTL = Errno(167) + EMVSEXPIRE = Errno(168) + EMVSPASSWORD = Errno(169) + EMVSWLMERROR = Errno(170) + EMVSCPLERROR = Errno(171) + EMVSARMERROR = Errno(172) + ELENOFORK = Errno(200) + ELEMSGERR = Errno(201) + EFPMASKINV = Errno(202) + EFPMODEINV = Errno(203) + EBUFLEN = Errno(227) + EEXTLINK = Errno(228) + ENODD = Errno(229) + ECMSESMERR = Errno(230) + ECPERR = Errno(231) + ELEMULTITHREAD = Errno(232) + ELEFENCE = Errno(244) + EBADDATA = Errno(245) + EUNKNOWN = Errno(246) + ENOTSUP = Errno(247) + EBADNAME = Errno(248) + ENOTSAFE = Errno(249) + ELEMULTITHREADFORK = Errno(257) + ECUNNOENV = Errno(258) + ECUNNOCONV = Errno(259) + ECUNNOTALIGNED = Errno(260) + ECUNERR = Errno(262) + EIBMBADCALL = Errno(1000) + EIBMBADPARM = Errno(1001) + EIBMSOCKOUTOFRANGE = Errno(1002) + EIBMSOCKINUSE = Errno(1003) + EIBMIUCVERR = Errno(1004) + EOFFLOADboxERROR = Errno(1005) + EOFFLOADboxRESTART = Errno(1006) + EOFFLOADboxDOWN = Errno(1007) + EIBMCONFLICT = Errno(1008) + EIBMCANCELLED = Errno(1009) + EIBMBADTCPNAME = Errno(1011) + ENOTBLK = Errno(1100) + ETXTBSY = Errno(1101) + EWOULDBLOCK = Errno(1102) + EINPROGRESS = Errno(1103) + EALREADY = Errno(1104) + ENOTSOCK = Errno(1105) + EDESTADDRREQ = Errno(1106) + EMSGSIZE = Errno(1107) + EPROTOTYPE = Errno(1108) + ENOPROTOOPT = Errno(1109) + EPROTONOSUPPORT = Errno(1110) + ESOCKTNOSUPPORT = Errno(1111) + EOPNOTSUPP = Errno(1112) + EPFNOSUPPORT = Errno(1113) + EAFNOSUPPORT = Errno(1114) + EADDRINUSE = Errno(1115) + EADDRNOTAVAIL = Errno(1116) + ENETDOWN = Errno(1117) + ENETUNREACH = Errno(1118) + ENETRESET = Errno(1119) + ECONNABORTED = Errno(1120) + ECONNRESET = Errno(1121) + ENOBUFS = Errno(1122) + EISCONN = Errno(1123) + ENOTCONN = Errno(1124) + ESHUTDOWN = Errno(1125) + ETOOMANYREFS = Errno(1126) + ETIMEDOUT = Errno(1127) + ECONNREFUSED = Errno(1128) + EHOSTDOWN = Errno(1129) + EHOSTUNREACH = Errno(1130) + EPROCLIM = Errno(1131) + EUSERS = Errno(1132) + EDQUOT = Errno(1133) + ESTALE = Errno(1134) + EREMOTE = Errno(1135) + ENOSTR = Errno(1136) + ETIME = Errno(1137) + ENOSR = Errno(1138) + ENOMSG = Errno(1139) + EBADMSG = Errno(1140) + EIDRM = Errno(1141) + ENONET = Errno(1142) + ERREMOTE = Errno(1143) + ENOLINK = Errno(1144) + EADV = Errno(1145) + ESRMNT = Errno(1146) + ECOMM = Errno(1147) + EPROTO = Errno(1148) + EMULTIHOP = Errno(1149) + EDOTDOT = Errno(1150) + EREMCHG = Errno(1151) + ECANCELED = Errno(1152) + EINTRNODATA = Errno(1159) + ENOREUSE = Errno(1160) + ENOMOVE = Errno(1161) +) + +// Signals +const ( + SIGHUP = Signal(1) + SIGINT = Signal(2) + SIGABRT = Signal(3) + SIGILL = Signal(4) + SIGPOLL = Signal(5) + SIGURG = Signal(6) + SIGSTOP = Signal(7) + SIGFPE = Signal(8) + SIGKILL = Signal(9) + SIGBUS = Signal(10) + SIGSEGV = Signal(11) + SIGSYS = Signal(12) + SIGPIPE = Signal(13) + SIGALRM = Signal(14) + SIGTERM = Signal(15) + SIGUSR1 = Signal(16) + SIGUSR2 = Signal(17) + SIGABND = Signal(18) + SIGCONT = Signal(19) + SIGCHLD = Signal(20) + SIGTTIN = Signal(21) + SIGTTOU = Signal(22) + SIGIO = Signal(23) + SIGQUIT = Signal(24) + SIGTSTP = Signal(25) + SIGTRAP = Signal(26) + SIGIOERR = Signal(27) + SIGWINCH = Signal(28) + SIGXCPU = Signal(29) + SIGXFSZ = Signal(30) + SIGVTALRM = Signal(31) + SIGPROF = Signal(32) + SIGDANGER = Signal(33) + SIGTHSTOP = Signal(34) + SIGTHCONT = Signal(35) + SIGTRACE = Signal(37) + SIGDCE = Signal(38) + SIGDUMP = Signal(39) +) + +// Error table +var errorList = [...]struct { + num Errno + name string + desc string +}{ + {1, "EDC5001I", "A domain error occurred."}, + {2, "EDC5002I", "A range error occurred."}, + {111, "EDC5111I", "Permission denied."}, + {112, "EDC5112I", "Resource temporarily unavailable."}, + {113, "EDC5113I", "Bad file descriptor."}, + {114, "EDC5114I", "Resource busy."}, + {115, "EDC5115I", "No child processes."}, + {116, "EDC5116I", "Resource deadlock avoided."}, + {117, "EDC5117I", "File exists."}, + {118, "EDC5118I", "Incorrect address."}, + {119, "EDC5119I", "File too large."}, + {120, "EDC5120I", "Interrupted function call."}, + {121, "EDC5121I", "Invalid argument."}, + {122, "EDC5122I", "Input/output error."}, + {123, "EDC5123I", "Is a directory."}, + {124, "EDC5124I", "Too many open files."}, + {125, "EDC5125I", "Too many links."}, + {126, "EDC5126I", "Filename too long."}, + {127, "EDC5127I", "Too many open files in system."}, + {128, "EDC5128I", "No such device."}, + {129, "EDC5129I", "No such file or directory."}, + {130, "EDC5130I", "Exec format error."}, + {131, "EDC5131I", "No locks available."}, + {132, "EDC5132I", "Not enough memory."}, + {133, "EDC5133I", "No space left on device."}, + {134, "EDC5134I", "Function not implemented."}, + {135, "EDC5135I", "Not a directory."}, + {136, "EDC5136I", "Directory not empty."}, + {137, "EDC5137I", "Inappropriate I/O control operation."}, + {138, "EDC5138I", "No such device or address."}, + {139, "EDC5139I", "Operation not permitted."}, + {140, "EDC5140I", "Broken pipe."}, + {141, "EDC5141I", "Read-only file system."}, + {142, "EDC5142I", "Invalid seek."}, + {143, "EDC5143I", "No such process."}, + {144, "EDC5144I", "Improper link."}, + {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, + {146, "EDC5146I", "Too many levels of symbolic links."}, + {147, "EDC5147I", "Illegal byte sequence."}, + {148, "", ""}, + {149, "EDC5149I", "Value Overflow Error."}, + {150, "EDC5150I", "UNIX System Services is not active."}, + {151, "EDC5151I", "Dynamic allocation error."}, + {152, "EDC5152I", "Common VTOC access facility (CVAF) error."}, + {153, "EDC5153I", "Catalog obtain error."}, + {156, "EDC5156I", "Process initialization error."}, + {157, "EDC5157I", "An internal error has occurred."}, + {158, "EDC5158I", "Bad parameters were passed to the service."}, + {159, "EDC5159I", "The Physical File System encountered a permanent file error."}, + {160, "EDC5160I", "Bad character in environment variable name."}, + {162, "EDC5162I", "The Physical File System encountered a system error."}, + {163, "EDC5163I", "SAF/RACF extract error."}, + {164, "EDC5164I", "SAF/RACF error."}, + {165, "EDC5165I", "System TOD clock not set."}, + {166, "EDC5166I", "Access mode argument on function call conflicts with PATHOPTS parameter on JCL DD statement."}, + {167, "EDC5167I", "Access to the UNIX System Services version of the C RTL is denied."}, + {168, "EDC5168I", "Password has expired."}, + {169, "EDC5169I", "Password is invalid."}, + {170, "EDC5170I", "An error was encountered with WLM."}, + {171, "EDC5171I", "An error was encountered with CPL."}, + {172, "EDC5172I", "An error was encountered with Application Response Measurement (ARM) component."}, + {200, "EDC5200I", "The application contains a Language Environment member language that cannot tolerate a fork()."}, + {201, "EDC5201I", "The Language Environment message file was not found in the hierarchical file system."}, + {202, "EDC5202E", "DLL facilities are not supported under SPC environment."}, + {203, "EDC5203E", "DLL facilities are not supported under POSIX environment."}, + {227, "EDC5227I", "Buffer is not long enough to contain a path definition"}, + {228, "EDC5228I", "The file referred to is an external link"}, + {229, "EDC5229I", "No path definition for ddname in effect"}, + {230, "EDC5230I", "ESM error."}, + {231, "EDC5231I", "CP or the external security manager had an error"}, + {232, "EDC5232I", "The function failed because it was invoked from a multithread environment."}, + {244, "EDC5244I", "The program, module or DLL is not supported in this environment."}, + {245, "EDC5245I", "Data is not valid."}, + {246, "EDC5246I", "Unknown system state."}, + {247, "EDC5247I", "Operation not supported."}, + {248, "EDC5248I", "The object name specified is not correct."}, + {249, "EDC5249I", "The function is not allowed."}, + {257, "EDC5257I", "Function cannot be called in the child process of a fork() from a multithreaded process until exec() is called."}, + {258, "EDC5258I", "A CUN_RS_NO_UNI_ENV error was issued by Unicode Services."}, + {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, + {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, + {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, + {1001, "EDC8001I", "An error was found in the IUCV header."}, + {1002, "EDC8002I", "A socket descriptor is out of range."}, + {1003, "EDC8003I", "A socket descriptor is in use."}, + {1004, "EDC8004I", "Request failed because of an IUCV error."}, + {1005, "EDC8005I", "Offload box error."}, + {1006, "EDC8006I", "Offload box restarted."}, + {1007, "EDC8007I", "Offload box down."}, + {1008, "EDC8008I", "Already a conflicting call outstanding on socket."}, + {1009, "EDC8009I", "Request cancelled using a SOCKcallCANCEL request."}, + {1011, "EDC8011I", "A name of a PFS was specified that either is not configured or is not a Sockets PFS."}, + {1100, "EDC8100I", "Block device required."}, + {1101, "EDC8101I", "Text file busy."}, + {1102, "EDC8102I", "Operation would block."}, + {1103, "EDC8103I", "Operation now in progress."}, + {1104, "EDC8104I", "Connection already in progress."}, + {1105, "EDC8105I", "Socket operation on non-socket."}, + {1106, "EDC8106I", "Destination address required."}, + {1107, "EDC8107I", "Message too long."}, + {1108, "EDC8108I", "Protocol wrong type for socket."}, + {1109, "EDC8109I", "Protocol not available."}, + {1110, "EDC8110I", "Protocol not supported."}, + {1111, "EDC8111I", "Socket type not supported."}, + {1112, "EDC8112I", "Operation not supported on socket."}, + {1113, "EDC8113I", "Protocol family not supported."}, + {1114, "EDC8114I", "Address family not supported."}, + {1115, "EDC8115I", "Address already in use."}, + {1116, "EDC8116I", "Address not available."}, + {1117, "EDC8117I", "Network is down."}, + {1118, "EDC8118I", "Network is unreachable."}, + {1119, "EDC8119I", "Network dropped connection on reset."}, + {1120, "EDC8120I", "Connection ended abnormally."}, + {1121, "EDC8121I", "Connection reset."}, + {1122, "EDC8122I", "No buffer space available."}, + {1123, "EDC8123I", "Socket already connected."}, + {1124, "EDC8124I", "Socket not connected."}, + {1125, "EDC8125I", "Can't send after socket shutdown."}, + {1126, "EDC8126I", "Too many references; can't splice."}, + {1127, "EDC8127I", "Connection timed out."}, + {1128, "EDC8128I", "Connection refused."}, + {1129, "EDC8129I", "Host is not available."}, + {1130, "EDC8130I", "Host cannot be reached."}, + {1131, "EDC8131I", "Too many processes."}, + {1132, "EDC8132I", "Too many users."}, + {1133, "EDC8133I", "Disk quota exceeded."}, + {1134, "EDC8134I", "Stale file handle."}, + {1135, "", ""}, + {1136, "EDC8136I", "File is not a STREAM."}, + {1137, "EDC8137I", "STREAMS ioctl() timeout."}, + {1138, "EDC8138I", "No STREAMS resources."}, + {1139, "EDC8139I", "The message identified by set_id and msg_id is not in the message catalog."}, + {1140, "EDC8140I", "Bad message."}, + {1141, "EDC8141I", "Identifier removed."}, + {1142, "", ""}, + {1143, "", ""}, + {1144, "EDC8144I", "The link has been severed."}, + {1145, "", ""}, + {1146, "", ""}, + {1147, "", ""}, + {1148, "EDC8148I", "Protocol error."}, + {1149, "EDC8149I", "Multihop not allowed."}, + {1150, "", ""}, + {1151, "", ""}, + {1152, "EDC8152I", "The asynchronous I/O request has been canceled."}, + {1159, "EDC8159I", "Function call was interrupted before any data was received."}, + {1160, "EDC8160I", "Socket reuse is not supported."}, + {1161, "EDC8161I", "The file system cannot currently be moved."}, +} + +// Signal table +var signalList = [...]struct { + num Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGABT", "aborted"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGPOLL", "pollable event"}, + {6, "SIGURG", "urgent I/O condition"}, + {7, "SIGSTOP", "stop process"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad argument to routine"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGABND", "abend"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGQUIT", "quit"}, + {25, "SIGTSTP", "stopped"}, + {26, "SIGTRAP", "trace/breakpoint trap"}, + {27, "SIGIOER", "I/O error"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGXCPU", "CPU time limit exceeded"}, + {30, "SIGXFSZ", "file size limit exceeded"}, + {31, "SIGVTALRM", "virtual timer expired"}, + {32, "SIGPROF", "profiling timer expired"}, + {33, "SIGDANGER", "danger"}, + {34, "SIGTHSTOP", "stop thread"}, + {35, "SIGTHCONT", "continue thread"}, + {37, "SIGTRACE", "trace"}, + {38, "", "DCE"}, + {39, "SIGDUMP", "dump"}, +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 89c5920e0cb..bd001a6e1cc 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 24b841eec50..c34d0639be3 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. +//go:build linux && (mips || mips64) // +build linux // +build mips mips64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 47b04895651..3ccf0c0c4a8 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. +//go:build linux && (mipsle || mips64le) // +build linux // +build mipsle mips64le diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index ea5d9cb536c..7d65857004c 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,5 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ed657ff1bc0..91a23cc7287 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -1,6 +1,7 @@ // go run mksyscall_aix_ppc.go -aix -tags aix,ppc syscall_aix.go syscall_aix_ppc.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build aix && ppc // +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 664b293b431..33c2609b8b4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -1,6 +1,7 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build aix && ppc64 // +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 0550da06d14..8b737fa971e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -1,8 +1,8 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build aix,ppc64 -// +build gc +//go:build aix && ppc64 && gc +// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index cde4dbc5f54..3c260917ed5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -1,8 +1,8 @@ // go run mksyscall_aix_ppc64.go -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. -// +build aix,ppc64 -// +build gccgo +//go:build aix && ppc64 && gccgo +// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go index c8c142c59a0..48a62e39062 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags darwin,386,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && 386 && go1.13 // +build darwin,386,go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 3877183483f..a266636af67 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags darwin,386,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && 386 && go1.12 // +build darwin,386,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go index 88826236136..e36299ead09 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && amd64 && go1.13 // +build darwin,amd64,go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 508e5639bf4..f4111628823 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && amd64 && go1.12 // +build darwin,amd64,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go index de4738fff80..ed437f89a9e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags darwin,arm,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm && go1.13 // +build darwin,arm,go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index c0c771f4025..7f88cb5ea22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags darwin,arm,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm && go1.12 // +build darwin,arm,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go index 870eb37abf5..d30ec4e29a0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm64 && go1.13 // +build darwin,arm64,go1.13 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 9b01a79c41e..a10df58d00e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build darwin && arm64 && go1.12 // +build darwin,arm64,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 1aaccd3615e..1b6eedfa611 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build dragonfly && amd64 // +build dragonfly,amd64 package unix @@ -362,8 +363,10 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func pipe2(p *[2]_C_int, flags int) (r int, w int, err error) { + r0, r1, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + r = int(r0) + w = int(r1) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 600f1d26d21..3e9bddb7b22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && 386 // +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 064934b0d13..c72a462b91e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && amd64 // +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 31d2c461657..530d5df90c0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && arm // +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 4adaaa56183..71e7df9e855 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags freebsd,arm64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build freebsd && arm64 // +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 665dd9e4b49..af5cb064ec4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall_solaris.go -illumos -tags illumos,amd64 syscall_illumos.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build illumos && amd64 // +build illumos,amd64 package unix @@ -14,19 +15,25 @@ import ( //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" //go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" +//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" +//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev //go:linkname procaccept4 libc_accept4 +//go:linkname procputmsg libc_putmsg +//go:linkname procgetmsg libc_getmsg var ( procreadv, procpreadv, procwritev, procpwritev, - procaccept4 syscallFunc + procaccept4, + procputmsg, + procgetmsg syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -99,3 +106,23 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 2fbbbe5a898..7305cc915b7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -531,6 +532,16 @@ func Close(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func CloseRange(first uint, last uint, flags uint) (err error) { + _, _, e1 := Syscall(SYS_CLOSE_RANGE, uintptr(first), uintptr(last), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 19ebd3ff75f..e37096e4dec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && 386 // +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5c562182a19..9919d8486d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && amd64 // +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index dc69d99c612..076754d48d1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && arm // +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 1b897dee05d..e893f987f91 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && arm64 // +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 49186843ae5..4703cf3c338 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1,6 +1,7 @@ // go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips // +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 9171d3bd2a6..a134f9a4d2e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips64 // +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 82286f04f9a..b1fff2d946a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mips64le // +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 15920621c47..d13d6da01ef 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && mipsle // +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 73a42e2ccba..da8ec039666 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && ppc64 // +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 6b85595366a..083f493bb6f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && ppc64le // +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index b76133447e8..63b393b8027 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && riscv64 // +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index d7032ab1e4a..bb347407d3d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && s390x // +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index bcbbdd906e8..8edc517e1e6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build linux && sparc64 // +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 3bbd9e39cda..4726ab30a8f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && 386 // +build netbsd,386 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index d8cf5012c27..fe71456dbc0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && amd64 // +build netbsd,amd64 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 1153fe69b8e..0b5b2f0143b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && arm // +build netbsd,arm package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 24b4ebb41fa..bfca28648fb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -netbsd -tags netbsd,arm64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build netbsd && arm64 // +build netbsd,arm64 package unix @@ -362,6 +363,16 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index b44b31aeb16..8f80f4ade51 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && 386 // +build openbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 67f93ee76d8..3a47aca7bf7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && amd64 // +build openbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index d7c878b1d0a..883a9b45e8e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && arm // +build openbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 8facd695d5a..aac7fdc95e2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && arm64 // +build openbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index ec6bd5bb73a..8776187462b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build openbsd && mips64 // +build openbsd,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 9898ce1f477..4e18d5c99fd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -1,6 +1,7 @@ // go run mksyscall_solaris.go -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build solaris && amd64 // +build solaris,amd64 package unix @@ -618,8 +619,9 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) +func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) if e1 != 0 { err = e1 } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go new file mode 100644 index 00000000000..8285ab8419e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -0,0 +1,1217 @@ +// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +import ( + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Creat(path string, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + retval = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpagesize() (pgsize int) { + r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) + pgsize = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + lastsys = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mount(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(uid int) (err error) { + _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func stat(path string, statLE *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + syscall_syscall(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tcgetattr(fildes int, termptr *Termios) (err error) { + _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { + _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Utime(path string, utim *Utimbuf) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func remove(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { + r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tv *timeval_zos) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 102f1ab4750..9e9d0b2a9c4 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index 4866fced8ae..adecd09667d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index d3801eb24b3..8ea52a4a181 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index ba4304fd233..154b57ae3e2 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index aca34b34933..d96bb2ba4db 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go index ad62324c7c1..1794ffc9245 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && darwin // +build 386,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index a2fc91d6a80..f8298ff9b58 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go index 20d7808ace3..6dc736449a5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && darwin // +build arm,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 527b9588cc9..5eb433bbf01 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 9912c6ee3d6..703675c0c4a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 9474974b657..59d5dfc2092 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 48a7beae7bb..342d471d2eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 4a6dfd4a745..e2e3d72c5b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 3e51af8edd2..61ad5ca3c19 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c80f3c7bdd8..8e535971348 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 2369995cec3..d7dceb769b3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 971c5af6d42..04093a69fd8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a5e410b24fb..48f94f135d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 438a6ea4e6b..499978c3e40 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 622472da220..10d1db2be0c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index e029b989b80..208d5dcd5a3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 86a497b58d4..f8250602eb8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b6b0b5e602a..d5ed3ff5100 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b344db0146d..e29b4424c24 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9b8fa53dccc..41deed6c3a5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 8261f72c0ba..8e53a9e8ceb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index f4bbeb3db5e..596e5bc7d35 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,6 +1,7 @@ // go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index e66a8c9d39e..3a6699eba98 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 42c788f2490..5677cd4f158 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index 0a0757179ba..e784cb6db1c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index 0291c0931b4..bd4952efa5b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index b0207d1c9bb..817edbf95c0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index f0dec6f0b43..ea453614e69 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 33d1dc5404e..467971eed66 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index fe2b689b637..32eec5ed56f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 5c08d573b3e..a37f7737563 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -1,6 +1,7 @@ // go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go new file mode 100644 index 00000000000..073daad43b7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -0,0 +1,2670 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +package unix + +// TODO: auto-generate. + +const ( + SYS_ACOSD128 = 0xB80 + SYS_ACOSD32 = 0xB7E + SYS_ACOSD64 = 0xB7F + SYS_ACOSHD128 = 0xB83 + SYS_ACOSHD32 = 0xB81 + SYS_ACOSHD64 = 0xB82 + SYS_AIO_FSYNC = 0xC69 + SYS_ASCTIME = 0x0AE + SYS_ASCTIME64 = 0xCD7 + SYS_ASCTIME64_R = 0xCD8 + SYS_ASIND128 = 0xB86 + SYS_ASIND32 = 0xB84 + SYS_ASIND64 = 0xB85 + SYS_ASINHD128 = 0xB89 + SYS_ASINHD32 = 0xB87 + SYS_ASINHD64 = 0xB88 + SYS_ATAN2D128 = 0xB8F + SYS_ATAN2D32 = 0xB8D + SYS_ATAN2D64 = 0xB8E + SYS_ATAND128 = 0xB8C + SYS_ATAND32 = 0xB8A + SYS_ATAND64 = 0xB8B + SYS_ATANHD128 = 0xB92 + SYS_ATANHD32 = 0xB90 + SYS_ATANHD64 = 0xB91 + SYS_BIND2ADDRSEL = 0xD59 + SYS_C16RTOMB = 0xD40 + SYS_C32RTOMB = 0xD41 + SYS_CBRTD128 = 0xB95 + SYS_CBRTD32 = 0xB93 + SYS_CBRTD64 = 0xB94 + SYS_CEILD128 = 0xB98 + SYS_CEILD32 = 0xB96 + SYS_CEILD64 = 0xB97 + SYS_CLEARENV = 0x0C9 + SYS_CLEARERR_UNLOCKED = 0xCA1 + SYS_CLOCK = 0x0AA + SYS_CLOGL = 0xA00 + SYS_CLRMEMF = 0x0BD + SYS_CONJ = 0xA03 + SYS_CONJF = 0xA06 + SYS_CONJL = 0xA09 + SYS_COPYSIGND128 = 0xB9E + SYS_COPYSIGND32 = 0xB9C + SYS_COPYSIGND64 = 0xB9D + SYS_COSD128 = 0xBA1 + SYS_COSD32 = 0xB9F + SYS_COSD64 = 0xBA0 + SYS_COSHD128 = 0xBA4 + SYS_COSHD32 = 0xBA2 + SYS_COSHD64 = 0xBA3 + SYS_CPOW = 0xA0C + SYS_CPOWF = 0xA0F + SYS_CPOWL = 0xA12 + SYS_CPROJ = 0xA15 + SYS_CPROJF = 0xA18 + SYS_CPROJL = 0xA1B + SYS_CREAL = 0xA1E + SYS_CREALF = 0xA21 + SYS_CREALL = 0xA24 + SYS_CSIN = 0xA27 + SYS_CSINF = 0xA2A + SYS_CSINH = 0xA30 + SYS_CSINHF = 0xA33 + SYS_CSINHL = 0xA36 + SYS_CSINL = 0xA2D + SYS_CSNAP = 0x0C5 + SYS_CSQRT = 0xA39 + SYS_CSQRTF = 0xA3C + SYS_CSQRTL = 0xA3F + SYS_CTAN = 0xA42 + SYS_CTANF = 0xA45 + SYS_CTANH = 0xA4B + SYS_CTANHF = 0xA4E + SYS_CTANHL = 0xA51 + SYS_CTANL = 0xA48 + SYS_CTIME = 0x0AB + SYS_CTIME64 = 0xCD9 + SYS_CTIME64_R = 0xCDA + SYS_CTRACE = 0x0C6 + SYS_DIFFTIME = 0x0A7 + SYS_DIFFTIME64 = 0xCDB + SYS_DLADDR = 0xC82 + SYS_DYNALLOC = 0x0C3 + SYS_DYNFREE = 0x0C2 + SYS_ERFCD128 = 0xBAA + SYS_ERFCD32 = 0xBA8 + SYS_ERFCD64 = 0xBA9 + SYS_ERFD128 = 0xBA7 + SYS_ERFD32 = 0xBA5 + SYS_ERFD64 = 0xBA6 + SYS_EXP2D128 = 0xBB0 + SYS_EXP2D32 = 0xBAE + SYS_EXP2D64 = 0xBAF + SYS_EXPD128 = 0xBAD + SYS_EXPD32 = 0xBAB + SYS_EXPD64 = 0xBAC + SYS_EXPM1D128 = 0xBB3 + SYS_EXPM1D32 = 0xBB1 + SYS_EXPM1D64 = 0xBB2 + SYS_FABSD128 = 0xBB6 + SYS_FABSD32 = 0xBB4 + SYS_FABSD64 = 0xBB5 + SYS_FDELREC_UNLOCKED = 0xCA2 + SYS_FDIMD128 = 0xBB9 + SYS_FDIMD32 = 0xBB7 + SYS_FDIMD64 = 0xBB8 + SYS_FDOPEN_UNLOCKED = 0xCFC + SYS_FECLEAREXCEPT = 0xAEA + SYS_FEGETENV = 0xAEB + SYS_FEGETEXCEPTFLAG = 0xAEC + SYS_FEGETROUND = 0xAED + SYS_FEHOLDEXCEPT = 0xAEE + SYS_FEOF_UNLOCKED = 0xCA3 + SYS_FERAISEEXCEPT = 0xAEF + SYS_FERROR_UNLOCKED = 0xCA4 + SYS_FESETENV = 0xAF0 + SYS_FESETEXCEPTFLAG = 0xAF1 + SYS_FESETROUND = 0xAF2 + SYS_FETCHEP = 0x0BF + SYS_FETESTEXCEPT = 0xAF3 + SYS_FEUPDATEENV = 0xAF4 + SYS_FE_DEC_GETROUND = 0xBBA + SYS_FE_DEC_SETROUND = 0xBBB + SYS_FFLUSH_UNLOCKED = 0xCA5 + SYS_FGETC_UNLOCKED = 0xC80 + SYS_FGETPOS64 = 0xCEE + SYS_FGETPOS64_UNLOCKED = 0xCF4 + SYS_FGETPOS_UNLOCKED = 0xCA6 + SYS_FGETS_UNLOCKED = 0xC7C + SYS_FGETWC_UNLOCKED = 0xCA7 + SYS_FGETWS_UNLOCKED = 0xCA8 + SYS_FILENO_UNLOCKED = 0xCA9 + SYS_FLDATA = 0x0C1 + SYS_FLDATA_UNLOCKED = 0xCAA + SYS_FLOCATE_UNLOCKED = 0xCAB + SYS_FLOORD128 = 0xBBE + SYS_FLOORD32 = 0xBBC + SYS_FLOORD64 = 0xBBD + SYS_FMA = 0xA63 + SYS_FMAD128 = 0xBC1 + SYS_FMAD32 = 0xBBF + SYS_FMAD64 = 0xBC0 + SYS_FMAF = 0xA66 + SYS_FMAL = 0xA69 + SYS_FMAX = 0xA6C + SYS_FMAXD128 = 0xBC4 + SYS_FMAXD32 = 0xBC2 + SYS_FMAXD64 = 0xBC3 + SYS_FMAXF = 0xA6F + SYS_FMAXL = 0xA72 + SYS_FMIN = 0xA75 + SYS_FMIND128 = 0xBC7 + SYS_FMIND32 = 0xBC5 + SYS_FMIND64 = 0xBC6 + SYS_FMINF = 0xA78 + SYS_FMINL = 0xA7B + SYS_FMODD128 = 0xBCA + SYS_FMODD32 = 0xBC8 + SYS_FMODD64 = 0xBC9 + SYS_FOPEN64 = 0xD49 + SYS_FOPEN64_UNLOCKED = 0xD4A + SYS_FOPEN_UNLOCKED = 0xCFA + SYS_FPRINTF_UNLOCKED = 0xCAC + SYS_FPUTC_UNLOCKED = 0xC81 + SYS_FPUTS_UNLOCKED = 0xC7E + SYS_FPUTWC_UNLOCKED = 0xCAD + SYS_FPUTWS_UNLOCKED = 0xCAE + SYS_FREAD_NOUPDATE = 0xCEC + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED + SYS_FREAD_UNLOCKED = 0xC7B + SYS_FREEIFADDRS = 0xCE6 + SYS_FREOPEN64 = 0xD4B + SYS_FREOPEN64_UNLOCKED = 0xD4C + SYS_FREOPEN_UNLOCKED = 0xCFB + SYS_FREXPD128 = 0xBCE + SYS_FREXPD32 = 0xBCC + SYS_FREXPD64 = 0xBCD + SYS_FSCANF_UNLOCKED = 0xCAF + SYS_FSEEK64 = 0xCEF + SYS_FSEEK64_UNLOCKED = 0xCF5 + SYS_FSEEKO64 = 0xCF0 + SYS_FSEEKO64_UNLOCKED = 0xCF6 + SYS_FSEEKO_UNLOCKED = 0xCB1 + SYS_FSEEK_UNLOCKED = 0xCB0 + SYS_FSETPOS64 = 0xCF1 + SYS_FSETPOS64_UNLOCKED = 0xCF7 + SYS_FSETPOS_UNLOCKED = 0xCB3 + SYS_FTELL64 = 0xCF2 + SYS_FTELL64_UNLOCKED = 0xCF8 + SYS_FTELLO64 = 0xCF3 + SYS_FTELLO64_UNLOCKED = 0xCF9 + SYS_FTELLO_UNLOCKED = 0xCB5 + SYS_FTELL_UNLOCKED = 0xCB4 + SYS_FUPDATE = 0x0B5 + SYS_FUPDATE_UNLOCKED = 0xCB7 + SYS_FWIDE_UNLOCKED = 0xCB8 + SYS_FWPRINTF_UNLOCKED = 0xCB9 + SYS_FWRITE_UNLOCKED = 0xC7A + SYS_FWSCANF_UNLOCKED = 0xCBA + SYS_GETDATE64 = 0xD4F + SYS_GETIFADDRS = 0xCE7 + SYS_GETIPV4SOURCEFILTER = 0xC77 + SYS_GETSOURCEFILTER = 0xC79 + SYS_GETSYNTX = 0x0FD + SYS_GETS_UNLOCKED = 0xC7D + SYS_GETTIMEOFDAY64 = 0xD50 + SYS_GETWCHAR_UNLOCKED = 0xCBC + SYS_GETWC_UNLOCKED = 0xCBB + SYS_GMTIME = 0x0B0 + SYS_GMTIME64 = 0xCDC + SYS_GMTIME64_R = 0xCDD + SYS_HYPOTD128 = 0xBD1 + SYS_HYPOTD32 = 0xBCF + SYS_HYPOTD64 = 0xBD0 + SYS_ILOGBD128 = 0xBD4 + SYS_ILOGBD32 = 0xBD2 + SYS_ILOGBD64 = 0xBD3 + SYS_ILOGBF = 0xA7E + SYS_ILOGBL = 0xA81 + SYS_INET6_IS_SRCADDR = 0xD5A + SYS_ISBLANK = 0x0FE + SYS_ISWALNUM = 0x0FF + SYS_LDEXPD128 = 0xBD7 + SYS_LDEXPD32 = 0xBD5 + SYS_LDEXPD64 = 0xBD6 + SYS_LGAMMAD128 = 0xBDA + SYS_LGAMMAD32 = 0xBD8 + SYS_LGAMMAD64 = 0xBD9 + SYS_LIO_LISTIO = 0xC6A + SYS_LLRINT = 0xA84 + SYS_LLRINTD128 = 0xBDD + SYS_LLRINTD32 = 0xBDB + SYS_LLRINTD64 = 0xBDC + SYS_LLRINTF = 0xA87 + SYS_LLRINTL = 0xA8A + SYS_LLROUND = 0xA8D + SYS_LLROUNDD128 = 0xBE0 + SYS_LLROUNDD32 = 0xBDE + SYS_LLROUNDD64 = 0xBDF + SYS_LLROUNDF = 0xA90 + SYS_LLROUNDL = 0xA93 + SYS_LOCALTIM = 0x0B1 + SYS_LOCALTIME = 0x0B1 + SYS_LOCALTIME64 = 0xCDE + SYS_LOCALTIME64_R = 0xCDF + SYS_LOG10D128 = 0xBE6 + SYS_LOG10D32 = 0xBE4 + SYS_LOG10D64 = 0xBE5 + SYS_LOG1PD128 = 0xBE9 + SYS_LOG1PD32 = 0xBE7 + SYS_LOG1PD64 = 0xBE8 + SYS_LOG2D128 = 0xBEC + SYS_LOG2D32 = 0xBEA + SYS_LOG2D64 = 0xBEB + SYS_LOGBD128 = 0xBEF + SYS_LOGBD32 = 0xBED + SYS_LOGBD64 = 0xBEE + SYS_LOGBF = 0xA96 + SYS_LOGBL = 0xA99 + SYS_LOGD128 = 0xBE3 + SYS_LOGD32 = 0xBE1 + SYS_LOGD64 = 0xBE2 + SYS_LRINT = 0xA9C + SYS_LRINTD128 = 0xBF2 + SYS_LRINTD32 = 0xBF0 + SYS_LRINTD64 = 0xBF1 + SYS_LRINTF = 0xA9F + SYS_LRINTL = 0xAA2 + SYS_LROUNDD128 = 0xBF5 + SYS_LROUNDD32 = 0xBF3 + SYS_LROUNDD64 = 0xBF4 + SYS_LROUNDL = 0xAA5 + SYS_MBLEN = 0x0AF + SYS_MBRTOC16 = 0xD42 + SYS_MBRTOC32 = 0xD43 + SYS_MEMSET = 0x0A3 + SYS_MKTIME = 0x0AC + SYS_MKTIME64 = 0xCE0 + SYS_MODFD128 = 0xBF8 + SYS_MODFD32 = 0xBF6 + SYS_MODFD64 = 0xBF7 + SYS_NAN = 0xAA8 + SYS_NAND128 = 0xBFB + SYS_NAND32 = 0xBF9 + SYS_NAND64 = 0xBFA + SYS_NANF = 0xAAA + SYS_NANL = 0xAAC + SYS_NEARBYINT = 0xAAE + SYS_NEARBYINTD128 = 0xBFE + SYS_NEARBYINTD32 = 0xBFC + SYS_NEARBYINTD64 = 0xBFD + SYS_NEARBYINTF = 0xAB1 + SYS_NEARBYINTL = 0xAB4 + SYS_NEXTAFTERD128 = 0xC01 + SYS_NEXTAFTERD32 = 0xBFF + SYS_NEXTAFTERD64 = 0xC00 + SYS_NEXTAFTERF = 0xAB7 + SYS_NEXTAFTERL = 0xABA + SYS_NEXTTOWARD = 0xABD + SYS_NEXTTOWARDD128 = 0xC04 + SYS_NEXTTOWARDD32 = 0xC02 + SYS_NEXTTOWARDD64 = 0xC03 + SYS_NEXTTOWARDF = 0xAC0 + SYS_NEXTTOWARDL = 0xAC3 + SYS_NL_LANGINFO = 0x0FC + SYS_PERROR_UNLOCKED = 0xCBD + SYS_POSIX_FALLOCATE = 0xCE8 + SYS_POSIX_MEMALIGN = 0xCE9 + SYS_POSIX_OPENPT = 0xC66 + SYS_POWD128 = 0xC07 + SYS_POWD32 = 0xC05 + SYS_POWD64 = 0xC06 + SYS_PRINTF_UNLOCKED = 0xCBE + SYS_PSELECT = 0xC67 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 + SYS_PUTS_UNLOCKED = 0xC7F + SYS_PUTWCHAR_UNLOCKED = 0xCC0 + SYS_PUTWC_UNLOCKED = 0xCBF + SYS_QUANTEXPD128 = 0xD46 + SYS_QUANTEXPD32 = 0xD44 + SYS_QUANTEXPD64 = 0xD45 + SYS_QUANTIZED128 = 0xC0A + SYS_QUANTIZED32 = 0xC08 + SYS_QUANTIZED64 = 0xC09 + SYS_REMAINDERD128 = 0xC0D + SYS_REMAINDERD32 = 0xC0B + SYS_REMAINDERD64 = 0xC0C + SYS_RESIZE_ALLOC = 0xCEB + SYS_REWIND_UNLOCKED = 0xCC1 + SYS_RINTD128 = 0xC13 + SYS_RINTD32 = 0xC11 + SYS_RINTD64 = 0xC12 + SYS_RINTF = 0xACB + SYS_RINTL = 0xACD + SYS_ROUND = 0xACF + SYS_ROUNDD128 = 0xC16 + SYS_ROUNDD32 = 0xC14 + SYS_ROUNDD64 = 0xC15 + SYS_ROUNDF = 0xAD2 + SYS_ROUNDL = 0xAD5 + SYS_SAMEQUANTUMD128 = 0xC19 + SYS_SAMEQUANTUMD32 = 0xC17 + SYS_SAMEQUANTUMD64 = 0xC18 + SYS_SCALBLN = 0xAD8 + SYS_SCALBLND128 = 0xC1C + SYS_SCALBLND32 = 0xC1A + SYS_SCALBLND64 = 0xC1B + SYS_SCALBLNF = 0xADB + SYS_SCALBLNL = 0xADE + SYS_SCALBND128 = 0xC1F + SYS_SCALBND32 = 0xC1D + SYS_SCALBND64 = 0xC1E + SYS_SCALBNF = 0xAE3 + SYS_SCALBNL = 0xAE6 + SYS_SCANF_UNLOCKED = 0xCC2 + SYS_SCHED_YIELD = 0xB32 + SYS_SETENV = 0x0C8 + SYS_SETIPV4SOURCEFILTER = 0xC76 + SYS_SETSOURCEFILTER = 0xC78 + SYS_SHM_OPEN = 0xC8C + SYS_SHM_UNLINK = 0xC8D + SYS_SIND128 = 0xC22 + SYS_SIND32 = 0xC20 + SYS_SIND64 = 0xC21 + SYS_SINHD128 = 0xC25 + SYS_SINHD32 = 0xC23 + SYS_SINHD64 = 0xC24 + SYS_SIZEOF_ALLOC = 0xCEA + SYS_SOCKATMARK = 0xC68 + SYS_SQRTD128 = 0xC28 + SYS_SQRTD32 = 0xC26 + SYS_SQRTD64 = 0xC27 + SYS_STRCHR = 0x0A0 + SYS_STRCSPN = 0x0A1 + SYS_STRERROR = 0x0A8 + SYS_STRERROR_R = 0xB33 + SYS_STRFTIME = 0x0B2 + SYS_STRLEN = 0x0A9 + SYS_STRPBRK = 0x0A2 + SYS_STRSPN = 0x0A4 + SYS_STRSTR = 0x0A5 + SYS_STRTOD128 = 0xC2B + SYS_STRTOD32 = 0xC29 + SYS_STRTOD64 = 0xC2A + SYS_STRTOK = 0x0A6 + SYS_TAND128 = 0xC2E + SYS_TAND32 = 0xC2C + SYS_TAND64 = 0xC2D + SYS_TANHD128 = 0xC31 + SYS_TANHD32 = 0xC2F + SYS_TANHD64 = 0xC30 + SYS_TGAMMAD128 = 0xC34 + SYS_TGAMMAD32 = 0xC32 + SYS_TGAMMAD64 = 0xC33 + SYS_TIME = 0x0AD + SYS_TIME64 = 0xCE1 + SYS_TMPFILE64 = 0xD4D + SYS_TMPFILE64_UNLOCKED = 0xD4E + SYS_TMPFILE_UNLOCKED = 0xCFD + SYS_TRUNCD128 = 0xC40 + SYS_TRUNCD32 = 0xC3E + SYS_TRUNCD64 = 0xC3F + SYS_UNGETC_UNLOCKED = 0xCC3 + SYS_UNGETWC_UNLOCKED = 0xCC4 + SYS_UNSETENV = 0xB34 + SYS_VFPRINTF_UNLOCKED = 0xCC5 + SYS_VFSCANF_UNLOCKED = 0xCC7 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 + SYS_VFWSCANF_UNLOCKED = 0xCCB + SYS_VPRINTF_UNLOCKED = 0xCCD + SYS_VSCANF_UNLOCKED = 0xCCF + SYS_VWPRINTF_UNLOCKED = 0xCD1 + SYS_VWSCANF_UNLOCKED = 0xCD3 + SYS_WCSTOD128 = 0xC43 + SYS_WCSTOD32 = 0xC41 + SYS_WCSTOD64 = 0xC42 + SYS_WPRINTF_UNLOCKED = 0xCD5 + SYS_WSCANF_UNLOCKED = 0xCD6 + SYS__FLUSHLBF = 0xD68 + SYS__FLUSHLBF_UNLOCKED = 0xD6F + SYS___ACOSHF_H = 0xA54 + SYS___ACOSHL_H = 0xA55 + SYS___ASINHF_H = 0xA56 + SYS___ASINHL_H = 0xA57 + SYS___ATANPID128 = 0xC6D + SYS___ATANPID32 = 0xC6B + SYS___ATANPID64 = 0xC6C + SYS___CBRTF_H = 0xA58 + SYS___CBRTL_H = 0xA59 + SYS___CDUMP = 0x0C4 + SYS___CLASS = 0xAFA + SYS___CLASS2 = 0xB99 + SYS___CLASS2D128 = 0xC99 + SYS___CLASS2D32 = 0xC97 + SYS___CLASS2D64 = 0xC98 + SYS___CLASS2F = 0xC91 + SYS___CLASS2F_B = 0xC93 + SYS___CLASS2F_H = 0xC94 + SYS___CLASS2L = 0xC92 + SYS___CLASS2L_B = 0xC95 + SYS___CLASS2L_H = 0xC96 + SYS___CLASS2_B = 0xB9A + SYS___CLASS2_H = 0xB9B + SYS___CLASS_B = 0xAFB + SYS___CLASS_H = 0xAFC + SYS___CLOGL_B = 0xA01 + SYS___CLOGL_H = 0xA02 + SYS___CLRENV = 0x0C9 + SYS___CLRMF = 0x0BD + SYS___CODEPAGE_INFO = 0xC64 + SYS___CONJF_B = 0xA07 + SYS___CONJF_H = 0xA08 + SYS___CONJL_B = 0xA0A + SYS___CONJL_H = 0xA0B + SYS___CONJ_B = 0xA04 + SYS___CONJ_H = 0xA05 + SYS___COPYSIGN_B = 0xA5A + SYS___COPYSIGN_H = 0xAF5 + SYS___COSPID128 = 0xC70 + SYS___COSPID32 = 0xC6E + SYS___COSPID64 = 0xC6F + SYS___CPOWF_B = 0xA10 + SYS___CPOWF_H = 0xA11 + SYS___CPOWL_B = 0xA13 + SYS___CPOWL_H = 0xA14 + SYS___CPOW_B = 0xA0D + SYS___CPOW_H = 0xA0E + SYS___CPROJF_B = 0xA19 + SYS___CPROJF_H = 0xA1A + SYS___CPROJL_B = 0xA1C + SYS___CPROJL_H = 0xA1D + SYS___CPROJ_B = 0xA16 + SYS___CPROJ_H = 0xA17 + SYS___CREALF_B = 0xA22 + SYS___CREALF_H = 0xA23 + SYS___CREALL_B = 0xA25 + SYS___CREALL_H = 0xA26 + SYS___CREAL_B = 0xA1F + SYS___CREAL_H = 0xA20 + SYS___CSINF_B = 0xA2B + SYS___CSINF_H = 0xA2C + SYS___CSINHF_B = 0xA34 + SYS___CSINHF_H = 0xA35 + SYS___CSINHL_B = 0xA37 + SYS___CSINHL_H = 0xA38 + SYS___CSINH_B = 0xA31 + SYS___CSINH_H = 0xA32 + SYS___CSINL_B = 0xA2E + SYS___CSINL_H = 0xA2F + SYS___CSIN_B = 0xA28 + SYS___CSIN_H = 0xA29 + SYS___CSNAP = 0x0C5 + SYS___CSQRTF_B = 0xA3D + SYS___CSQRTF_H = 0xA3E + SYS___CSQRTL_B = 0xA40 + SYS___CSQRTL_H = 0xA41 + SYS___CSQRT_B = 0xA3A + SYS___CSQRT_H = 0xA3B + SYS___CTANF_B = 0xA46 + SYS___CTANF_H = 0xA47 + SYS___CTANHF_B = 0xA4F + SYS___CTANHF_H = 0xA50 + SYS___CTANHL_B = 0xA52 + SYS___CTANHL_H = 0xA53 + SYS___CTANH_B = 0xA4C + SYS___CTANH_H = 0xA4D + SYS___CTANL_B = 0xA49 + SYS___CTANL_H = 0xA4A + SYS___CTAN_B = 0xA43 + SYS___CTAN_H = 0xA44 + SYS___CTEST = 0x0C7 + SYS___CTRACE = 0x0C6 + SYS___D1TOP = 0xC9B + SYS___D2TOP = 0xC9C + SYS___D4TOP = 0xC9D + SYS___DYNALL = 0x0C3 + SYS___DYNFRE = 0x0C2 + SYS___EXP2F_H = 0xA5E + SYS___EXP2L_H = 0xA5F + SYS___EXP2_H = 0xA5D + SYS___EXPM1F_H = 0xA5B + SYS___EXPM1L_H = 0xA5C + SYS___FBUFSIZE = 0xD60 + SYS___FLBF = 0xD62 + SYS___FLDATA = 0x0C1 + SYS___FMAF_B = 0xA67 + SYS___FMAF_H = 0xA68 + SYS___FMAL_B = 0xA6A + SYS___FMAL_H = 0xA6B + SYS___FMAXF_B = 0xA70 + SYS___FMAXF_H = 0xA71 + SYS___FMAXL_B = 0xA73 + SYS___FMAXL_H = 0xA74 + SYS___FMAX_B = 0xA6D + SYS___FMAX_H = 0xA6E + SYS___FMA_B = 0xA64 + SYS___FMA_H = 0xA65 + SYS___FMINF_B = 0xA79 + SYS___FMINF_H = 0xA7A + SYS___FMINL_B = 0xA7C + SYS___FMINL_H = 0xA7D + SYS___FMIN_B = 0xA76 + SYS___FMIN_H = 0xA77 + SYS___FPENDING = 0xD61 + SYS___FPENDING_UNLOCKED = 0xD6C + SYS___FPURGE = 0xD69 + SYS___FPURGE_UNLOCKED = 0xD70 + SYS___FP_CAST_D = 0xBCB + SYS___FREADABLE = 0xD63 + SYS___FREADAHEAD = 0xD6A + SYS___FREADAHEAD_UNLOCKED = 0xD71 + SYS___FREADING = 0xD65 + SYS___FREADING_UNLOCKED = 0xD6D + SYS___FSEEK2 = 0xB3C + SYS___FSETERR = 0xD6B + SYS___FSETLOCKING = 0xD67 + SYS___FTCHEP = 0x0BF + SYS___FTELL2 = 0xB3B + SYS___FUPDT = 0x0B5 + SYS___FWRITABLE = 0xD64 + SYS___FWRITING = 0xD66 + SYS___FWRITING_UNLOCKED = 0xD6E + SYS___GETCB = 0x0B4 + SYS___GETGRGID1 = 0xD5B + SYS___GETGRNAM1 = 0xD5C + SYS___GETTHENT = 0xCE5 + SYS___GETTOD = 0xD3E + SYS___HYPOTF_H = 0xAF6 + SYS___HYPOTL_H = 0xAF7 + SYS___ILOGBF_B = 0xA7F + SYS___ILOGBF_H = 0xA80 + SYS___ILOGBL_B = 0xA82 + SYS___ILOGBL_H = 0xA83 + SYS___ISBLANK_A = 0xB2E + SYS___ISBLNK = 0x0FE + SYS___ISWBLANK_A = 0xB2F + SYS___LE_CEEGTJS = 0xD72 + SYS___LE_TRACEBACK = 0xB7A + SYS___LGAMMAL_H = 0xA62 + SYS___LGAMMA_B_C99 = 0xB39 + SYS___LGAMMA_H_C99 = 0xB38 + SYS___LGAMMA_R_C99 = 0xB3A + SYS___LLRINTF_B = 0xA88 + SYS___LLRINTF_H = 0xA89 + SYS___LLRINTL_B = 0xA8B + SYS___LLRINTL_H = 0xA8C + SYS___LLRINT_B = 0xA85 + SYS___LLRINT_H = 0xA86 + SYS___LLROUNDF_B = 0xA91 + SYS___LLROUNDF_H = 0xA92 + SYS___LLROUNDL_B = 0xA94 + SYS___LLROUNDL_H = 0xA95 + SYS___LLROUND_B = 0xA8E + SYS___LLROUND_H = 0xA8F + SYS___LOCALE_CTL = 0xD47 + SYS___LOG1PF_H = 0xA60 + SYS___LOG1PL_H = 0xA61 + SYS___LOGBF_B = 0xA97 + SYS___LOGBF_H = 0xA98 + SYS___LOGBL_B = 0xA9A + SYS___LOGBL_H = 0xA9B + SYS___LOGIN_APPLID = 0xCE2 + SYS___LRINTF_B = 0xAA0 + SYS___LRINTF_H = 0xAA1 + SYS___LRINTL_B = 0xAA3 + SYS___LRINTL_H = 0xAA4 + SYS___LRINT_B = 0xA9D + SYS___LRINT_H = 0xA9E + SYS___LROUNDF_FIXUP = 0xB31 + SYS___LROUNDL_B = 0xAA6 + SYS___LROUNDL_H = 0xAA7 + SYS___LROUND_FIXUP = 0xB30 + SYS___MOSERVICES = 0xD3D + SYS___MUST_STAY_CLEAN = 0xB7C + SYS___NANF_B = 0xAAB + SYS___NANL_B = 0xAAD + SYS___NAN_B = 0xAA9 + SYS___NEARBYINTF_B = 0xAB2 + SYS___NEARBYINTF_H = 0xAB3 + SYS___NEARBYINTL_B = 0xAB5 + SYS___NEARBYINTL_H = 0xAB6 + SYS___NEARBYINT_B = 0xAAF + SYS___NEARBYINT_H = 0xAB0 + SYS___NEXTAFTERF_B = 0xAB8 + SYS___NEXTAFTERF_H = 0xAB9 + SYS___NEXTAFTERL_B = 0xABB + SYS___NEXTAFTERL_H = 0xABC + SYS___NEXTTOWARDF_B = 0xAC1 + SYS___NEXTTOWARDF_H = 0xAC2 + SYS___NEXTTOWARDL_B = 0xAC4 + SYS___NEXTTOWARDL_H = 0xAC5 + SYS___NEXTTOWARD_B = 0xABE + SYS___NEXTTOWARD_H = 0xABF + SYS___O_ENV = 0xB7D + SYS___PASSWD_APPLID = 0xCE3 + SYS___PTOD1 = 0xC9E + SYS___PTOD2 = 0xC9F + SYS___PTOD4 = 0xCA0 + SYS___REGCOMP_STD = 0x0EA + SYS___REMAINDERF_H = 0xAC6 + SYS___REMAINDERL_H = 0xAC7 + SYS___REMQUOD128 = 0xC10 + SYS___REMQUOD32 = 0xC0E + SYS___REMQUOD64 = 0xC0F + SYS___REMQUOF_H = 0xAC9 + SYS___REMQUOL_H = 0xACA + SYS___REMQUO_H = 0xAC8 + SYS___RINTF_B = 0xACC + SYS___RINTL_B = 0xACE + SYS___ROUNDF_B = 0xAD3 + SYS___ROUNDF_H = 0xAD4 + SYS___ROUNDL_B = 0xAD6 + SYS___ROUNDL_H = 0xAD7 + SYS___ROUND_B = 0xAD0 + SYS___ROUND_H = 0xAD1 + SYS___SCALBLNF_B = 0xADC + SYS___SCALBLNF_H = 0xADD + SYS___SCALBLNL_B = 0xADF + SYS___SCALBLNL_H = 0xAE0 + SYS___SCALBLN_B = 0xAD9 + SYS___SCALBLN_H = 0xADA + SYS___SCALBNF_B = 0xAE4 + SYS___SCALBNF_H = 0xAE5 + SYS___SCALBNL_B = 0xAE7 + SYS___SCALBNL_H = 0xAE8 + SYS___SCALBN_B = 0xAE1 + SYS___SCALBN_H = 0xAE2 + SYS___SETENV = 0x0C8 + SYS___SINPID128 = 0xC73 + SYS___SINPID32 = 0xC71 + SYS___SINPID64 = 0xC72 + SYS___SMF_RECORD2 = 0xD48 + SYS___STATIC_REINIT = 0xB3D + SYS___TGAMMAF_H_C99 = 0xB79 + SYS___TGAMMAL_H = 0xAE9 + SYS___TGAMMA_H_C99 = 0xB78 + SYS___TOCSNAME2 = 0xC9A + SYS_CEIL = 0x01F + SYS_CHAUDIT = 0x1E0 + SYS_EXP = 0x01A + SYS_FCHAUDIT = 0x1E1 + SYS_FREXP = 0x01D + SYS_GETGROUPSBYNAME = 0x1E2 + SYS_GETPWUID = 0x1A0 + SYS_GETUID = 0x1A1 + SYS_ISATTY = 0x1A3 + SYS_KILL = 0x1A4 + SYS_LDEXP = 0x01E + SYS_LINK = 0x1A5 + SYS_LOG10 = 0x01C + SYS_LSEEK = 0x1A6 + SYS_LSTAT = 0x1A7 + SYS_MKDIR = 0x1A8 + SYS_MKFIFO = 0x1A9 + SYS_MKNOD = 0x1AA + SYS_MODF = 0x01B + SYS_MOUNT = 0x1AB + SYS_OPEN = 0x1AC + SYS_OPENDIR = 0x1AD + SYS_PATHCONF = 0x1AE + SYS_PAUSE = 0x1AF + SYS_PIPE = 0x1B0 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED + SYS_PTHREAD_ATTR_INIT = 0x1E6 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC + SYS_PTHREAD_CANCEL = 0x1EE + SYS_PTHREAD_CLEANUP_POP = 0x1F0 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 + SYS_PTHREAD_COND_DESTROY = 0x1F4 + SYS_PTHREAD_COND_INIT = 0x1F3 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 + SYS_PTHREAD_COND_WAIT = 0x1F7 + SYS_PTHREAD_CREATE = 0x1F9 + SYS_PTHREAD_DETACH = 0x1FA + SYS_PTHREAD_EQUAL = 0x1FB + SYS_PTHREAD_EXIT = 0x1E4 + SYS_PTHREAD_GETSPECIFIC = 0x1FC + SYS_PTHREAD_JOIN = 0x1FD + SYS_PTHREAD_KEY_CREATE = 0x1FE + SYS_PTHREAD_KILL = 0x1E5 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF + SYS_READ = 0x1B2 + SYS_READDIR = 0x1B3 + SYS_READLINK = 0x1B4 + SYS_REWINDDIR = 0x1B5 + SYS_RMDIR = 0x1B6 + SYS_SETEGID = 0x1B7 + SYS_SETEUID = 0x1B8 + SYS_SETGID = 0x1B9 + SYS_SETPGID = 0x1BA + SYS_SETSID = 0x1BB + SYS_SETUID = 0x1BC + SYS_SIGACTION = 0x1BD + SYS_SIGADDSET = 0x1BE + SYS_SIGDELSET = 0x1BF + SYS_SIGEMPTYSET = 0x1C0 + SYS_SIGFILLSET = 0x1C1 + SYS_SIGISMEMBER = 0x1C2 + SYS_SIGLONGJMP = 0x1C3 + SYS_SIGPENDING = 0x1C4 + SYS_SIGPROCMASK = 0x1C5 + SYS_SIGSETJMP = 0x1C6 + SYS_SIGSUSPEND = 0x1C7 + SYS_SIGWAIT = 0x1E3 + SYS_SLEEP = 0x1C8 + SYS_STAT = 0x1C9 + SYS_SYMLINK = 0x1CB + SYS_SYSCONF = 0x1CC + SYS_TCDRAIN = 0x1CD + SYS_TCFLOW = 0x1CE + SYS_TCFLUSH = 0x1CF + SYS_TCGETATTR = 0x1D0 + SYS_TCGETPGRP = 0x1D1 + SYS_TCSENDBREAK = 0x1D2 + SYS_TCSETATTR = 0x1D3 + SYS_TCSETPGRP = 0x1D4 + SYS_TIMES = 0x1D5 + SYS_TTYNAME = 0x1D6 + SYS_TZSET = 0x1D7 + SYS_UMASK = 0x1D8 + SYS_UMOUNT = 0x1D9 + SYS_UNAME = 0x1DA + SYS_UNLINK = 0x1DB + SYS_UTIME = 0x1DC + SYS_WAIT = 0x1DD + SYS_WAITPID = 0x1DE + SYS_WRITE = 0x1DF + SYS_W_GETPSENT = 0x1B1 + SYS_W_IOCTL = 0x1A2 + SYS_W_STATFS = 0x1CA + SYS_A64L = 0x2EF + SYS_BCMP = 0x2B9 + SYS_BCOPY = 0x2BA + SYS_BZERO = 0x2BB + SYS_CATCLOSE = 0x2B6 + SYS_CATGETS = 0x2B7 + SYS_CATOPEN = 0x2B8 + SYS_CRYPT = 0x2AC + SYS_DBM_CLEARERR = 0x2F7 + SYS_DBM_CLOSE = 0x2F8 + SYS_DBM_DELETE = 0x2F9 + SYS_DBM_ERROR = 0x2FA + SYS_DBM_FETCH = 0x2FB + SYS_DBM_FIRSTKEY = 0x2FC + SYS_DBM_NEXTKEY = 0x2FD + SYS_DBM_OPEN = 0x2FE + SYS_DBM_STORE = 0x2FF + SYS_DRAND48 = 0x2B2 + SYS_ENCRYPT = 0x2AD + SYS_ENDUTXENT = 0x2E1 + SYS_ERAND48 = 0x2B3 + SYS_ERF = 0x02C + SYS_ERFC = 0x02D + SYS_FCHDIR = 0x2D9 + SYS_FFS = 0x2BC + SYS_FMTMSG = 0x2E5 + SYS_FSTATVFS = 0x2B4 + SYS_FTIME = 0x2F5 + SYS_GAMMA = 0x02E + SYS_GETDATE = 0x2A6 + SYS_GETPAGESIZE = 0x2D8 + SYS_GETTIMEOFDAY = 0x2F6 + SYS_GETUTXENT = 0x2E0 + SYS_GETUTXID = 0x2E2 + SYS_GETUTXLINE = 0x2E3 + SYS_HCREATE = 0x2C6 + SYS_HDESTROY = 0x2C7 + SYS_HSEARCH = 0x2C8 + SYS_HYPOT = 0x02B + SYS_INDEX = 0x2BD + SYS_INITSTATE = 0x2C2 + SYS_INSQUE = 0x2CF + SYS_ISASCII = 0x2ED + SYS_JRAND48 = 0x2E6 + SYS_L64A = 0x2F0 + SYS_LCONG48 = 0x2EA + SYS_LFIND = 0x2C9 + SYS_LRAND48 = 0x2E7 + SYS_LSEARCH = 0x2CA + SYS_MEMCCPY = 0x2D4 + SYS_MRAND48 = 0x2E8 + SYS_NRAND48 = 0x2E9 + SYS_PCLOSE = 0x2D2 + SYS_POPEN = 0x2D1 + SYS_PUTUTXLINE = 0x2E4 + SYS_RANDOM = 0x2C4 + SYS_REMQUE = 0x2D0 + SYS_RINDEX = 0x2BE + SYS_SEED48 = 0x2EC + SYS_SETKEY = 0x2AE + SYS_SETSTATE = 0x2C3 + SYS_SETUTXENT = 0x2DF + SYS_SRAND48 = 0x2EB + SYS_SRANDOM = 0x2C5 + SYS_STATVFS = 0x2B5 + SYS_STRCASECMP = 0x2BF + SYS_STRDUP = 0x2C0 + SYS_STRNCASECMP = 0x2C1 + SYS_SWAB = 0x2D3 + SYS_TDELETE = 0x2CB + SYS_TFIND = 0x2CC + SYS_TOASCII = 0x2EE + SYS_TSEARCH = 0x2CD + SYS_TWALK = 0x2CE + SYS_UALARM = 0x2F1 + SYS_USLEEP = 0x2F2 + SYS_WAIT3 = 0x2A7 + SYS_WAITID = 0x2A8 + SYS_Y1 = 0x02A + SYS___ATOE = 0x2DB + SYS___ATOE_L = 0x2DC + SYS___CATTRM = 0x2A9 + SYS___CNVBLK = 0x2AF + SYS___CRYTRM = 0x2B0 + SYS___DLGHT = 0x2A1 + SYS___ECRTRM = 0x2B1 + SYS___ETOA = 0x2DD + SYS___ETOA_L = 0x2DE + SYS___GDTRM = 0x2AA + SYS___OCLCK = 0x2DA + SYS___OPARGF = 0x2A2 + SYS___OPERRF = 0x2A5 + SYS___OPINDF = 0x2A4 + SYS___OPOPTF = 0x2A3 + SYS___RNDTRM = 0x2AB + SYS___SRCTRM = 0x2F4 + SYS___TZONE = 0x2A0 + SYS___UTXTRM = 0x2F3 + SYS_ASIN = 0x03E + SYS_ISXDIGIT = 0x03B + SYS_SETLOCAL = 0x03A + SYS_SETLOCALE = 0x03A + SYS_SIN = 0x03F + SYS_TOLOWER = 0x03C + SYS_TOUPPER = 0x03D + SYS_ACCEPT_AND_RECV = 0x4F7 + SYS_ATOL = 0x04E + SYS_CHECKSCH = 0x4BC + SYS_CHECKSCHENV = 0x4BC + SYS_CLEARERR = 0x04C + SYS_CONNECTS = 0x4B5 + SYS_CONNECTSERVER = 0x4B5 + SYS_CONNECTW = 0x4B4 + SYS_CONNECTWORKMGR = 0x4B4 + SYS_CONTINUE = 0x4B3 + SYS_CONTINUEWORKUNIT = 0x4B3 + SYS_COPYSIGN = 0x4C2 + SYS_CREATEWO = 0x4B2 + SYS_CREATEWORKUNIT = 0x4B2 + SYS_DELETEWO = 0x4B9 + SYS_DELETEWORKUNIT = 0x4B9 + SYS_DISCONNE = 0x4B6 + SYS_DISCONNECTSERVER = 0x4B6 + SYS_FEOF = 0x04D + SYS_FERROR = 0x04A + SYS_FINITE = 0x4C8 + SYS_GAMMA_R = 0x4E2 + SYS_JOINWORK = 0x4B7 + SYS_JOINWORKUNIT = 0x4B7 + SYS_LEAVEWOR = 0x4B8 + SYS_LEAVEWORKUNIT = 0x4B8 + SYS_LGAMMA_R = 0x4EB + SYS_MATHERR = 0x4D0 + SYS_PERROR = 0x04F + SYS_QUERYMET = 0x4BA + SYS_QUERYMETRICS = 0x4BA + SYS_QUERYSCH = 0x4BB + SYS_QUERYSCHENV = 0x4BB + SYS_REWIND = 0x04B + SYS_SCALBN = 0x4D4 + SYS_SIGNIFIC = 0x4D5 + SYS_SIGNIFICAND = 0x4D5 + SYS___ACOSH_B = 0x4DA + SYS___ACOS_B = 0x4D9 + SYS___ASINH_B = 0x4BE + SYS___ASIN_B = 0x4DB + SYS___ATAN2_B = 0x4DC + SYS___ATANH_B = 0x4DD + SYS___ATAN_B = 0x4BF + SYS___CBRT_B = 0x4C0 + SYS___CEIL_B = 0x4C1 + SYS___COSH_B = 0x4DE + SYS___COS_B = 0x4C3 + SYS___DGHT = 0x4A8 + SYS___ENVN = 0x4B0 + SYS___ERFC_B = 0x4C5 + SYS___ERF_B = 0x4C4 + SYS___EXPM1_B = 0x4C6 + SYS___EXP_B = 0x4DF + SYS___FABS_B = 0x4C7 + SYS___FLOOR_B = 0x4C9 + SYS___FMOD_B = 0x4E0 + SYS___FP_SETMODE = 0x4F8 + SYS___FREXP_B = 0x4CA + SYS___GAMMA_B = 0x4E1 + SYS___GDRR = 0x4A1 + SYS___HRRNO = 0x4A2 + SYS___HYPOT_B = 0x4E3 + SYS___ILOGB_B = 0x4CB + SYS___ISNAN_B = 0x4CC + SYS___J0_B = 0x4E4 + SYS___J1_B = 0x4E6 + SYS___JN_B = 0x4E8 + SYS___LDEXP_B = 0x4CD + SYS___LGAMMA_B = 0x4EA + SYS___LOG10_B = 0x4ED + SYS___LOG1P_B = 0x4CE + SYS___LOGB_B = 0x4CF + SYS___LOGIN = 0x4F5 + SYS___LOG_B = 0x4EC + SYS___MLOCKALL = 0x4B1 + SYS___MODF_B = 0x4D1 + SYS___NEXTAFTER_B = 0x4D2 + SYS___OPENDIR2 = 0x4F3 + SYS___OPEN_STAT = 0x4F6 + SYS___OPND = 0x4A5 + SYS___OPPT = 0x4A6 + SYS___OPRG = 0x4A3 + SYS___OPRR = 0x4A4 + SYS___PID_AFFINITY = 0x4BD + SYS___POW_B = 0x4EE + SYS___READDIR2 = 0x4F4 + SYS___REMAINDER_B = 0x4EF + SYS___RINT_B = 0x4D3 + SYS___SCALB_B = 0x4F0 + SYS___SIGACTIONSET = 0x4FB + SYS___SIGGM = 0x4A7 + SYS___SINH_B = 0x4F1 + SYS___SIN_B = 0x4D6 + SYS___SQRT_B = 0x4F2 + SYS___TANH_B = 0x4D8 + SYS___TAN_B = 0x4D7 + SYS___TRRNO = 0x4AF + SYS___TZNE = 0x4A9 + SYS___TZZN = 0x4AA + SYS___UCREATE = 0x4FC + SYS___UFREE = 0x4FE + SYS___UHEAPREPORT = 0x4FF + SYS___UMALLOC = 0x4FD + SYS___Y0_B = 0x4E5 + SYS___Y1_B = 0x4E7 + SYS___YN_B = 0x4E9 + SYS_ABORT = 0x05C + SYS_ASCTIME_R = 0x5E0 + SYS_ATEXIT = 0x05D + SYS_CONNECTE = 0x5AE + SYS_CONNECTEXPORTIMPORT = 0x5AE + SYS_CTIME_R = 0x5E1 + SYS_DN_COMP = 0x5DF + SYS_DN_EXPAND = 0x5DD + SYS_DN_SKIPNAME = 0x5DE + SYS_EXIT = 0x05A + SYS_EXPORTWO = 0x5A1 + SYS_EXPORTWORKUNIT = 0x5A1 + SYS_EXTRACTW = 0x5A5 + SYS_EXTRACTWORKUNIT = 0x5A5 + SYS_FSEEKO = 0x5C9 + SYS_FTELLO = 0x5C8 + SYS_GETGRGID_R = 0x5E7 + SYS_GETGRNAM_R = 0x5E8 + SYS_GETLOGIN_R = 0x5E9 + SYS_GETPWNAM_R = 0x5EA + SYS_GETPWUID_R = 0x5EB + SYS_GMTIME_R = 0x5E2 + SYS_IMPORTWO = 0x5A3 + SYS_IMPORTWORKUNIT = 0x5A3 + SYS_INET_NTOP = 0x5D3 + SYS_INET_PTON = 0x5D4 + SYS_LLABS = 0x5CE + SYS_LLDIV = 0x5CB + SYS_LOCALTIME_R = 0x5E3 + SYS_PTHREAD_ATFORK = 0x5ED + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 + SYS_PTHREAD_DETACH_U98 = 0x5FD + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE + SYS_PTHREAD_KEY_DELETE = 0x5F5 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 + SYS_PTHREAD_SIGMASK = 0x5F7 + SYS_QUERYENC = 0x5AD + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD + SYS_RAISE = 0x05E + SYS_RAND_R = 0x5E4 + SYS_READDIR_R = 0x5E6 + SYS_REALLOC = 0x05B + SYS_RES_INIT = 0x5D8 + SYS_RES_MKQUERY = 0x5D7 + SYS_RES_QUERY = 0x5D9 + SYS_RES_QUERYDOMAIN = 0x5DC + SYS_RES_SEARCH = 0x5DA + SYS_RES_SEND = 0x5DB + SYS_SETJMP = 0x05F + SYS_SIGQUEUE = 0x5A9 + SYS_STRTOK_R = 0x5E5 + SYS_STRTOLL = 0x5B0 + SYS_STRTOULL = 0x5B1 + SYS_TTYNAME_R = 0x5EC + SYS_UNDOEXPO = 0x5A2 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 + SYS_UNDOIMPO = 0x5A4 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 + SYS_WCSTOLL = 0x5CC + SYS_WCSTOULL = 0x5CD + SYS___ABORT = 0x05C + SYS___CONSOLE2 = 0x5D2 + SYS___CPL = 0x5A6 + SYS___DISCARDDATA = 0x5F8 + SYS___DSA_PREV = 0x5B2 + SYS___EP_FIND = 0x5B3 + SYS___FP_SWAPMODE = 0x5AF + SYS___GETUSERID = 0x5AB + SYS___GET_CPUID = 0x5B9 + SYS___GET_SYSTEM_SETTINGS = 0x5BA + SYS___IPDOMAINNAME = 0x5AC + SYS___MAP_INIT = 0x5A7 + SYS___MAP_SERVICE = 0x5A8 + SYS___MOUNT = 0x5AA + SYS___MSGRCV_TIMED = 0x5B7 + SYS___RES = 0x5D6 + SYS___SEMOP_TIMED = 0x5B8 + SYS___SERVER_THREADS_QUERY = 0x5B4 + SYS_FPRINTF = 0x06D + SYS_FSCANF = 0x06A + SYS_PRINTF = 0x06F + SYS_SETBUF = 0x06B + SYS_SETVBUF = 0x06C + SYS_SSCANF = 0x06E + SYS___CATGETS_A = 0x6C0 + SYS___CHAUDIT_A = 0x6F4 + SYS___CHMOD_A = 0x6E8 + SYS___COLLATE_INIT_A = 0x6AC + SYS___CREAT_A = 0x6F6 + SYS___CTYPE_INIT_A = 0x6AF + SYS___DLLLOAD_A = 0x6DF + SYS___DLLQUERYFN_A = 0x6E0 + SYS___DLLQUERYVAR_A = 0x6E1 + SYS___E2A_L = 0x6E3 + SYS___EXECLE_A = 0x6A0 + SYS___EXECLP_A = 0x6A4 + SYS___EXECVE_A = 0x6C1 + SYS___EXECVP_A = 0x6C2 + SYS___EXECV_A = 0x6B1 + SYS___FPRINTF_A = 0x6FA + SYS___GETADDRINFO_A = 0x6BF + SYS___GETNAMEINFO_A = 0x6C4 + SYS___GET_WCTYPE_STD_A = 0x6AE + SYS___ICONV_OPEN_A = 0x6DE + SYS___IF_INDEXTONAME_A = 0x6DC + SYS___IF_NAMETOINDEX_A = 0x6DB + SYS___ISWCTYPE_A = 0x6B0 + SYS___IS_WCTYPE_STD_A = 0x6B2 + SYS___LOCALECONV_A = 0x6B8 + SYS___LOCALECONV_STD_A = 0x6B9 + SYS___LOCALE_INIT_A = 0x6B7 + SYS___LSTAT_A = 0x6EE + SYS___LSTAT_O_A = 0x6EF + SYS___MKDIR_A = 0x6E9 + SYS___MKFIFO_A = 0x6EC + SYS___MKNOD_A = 0x6F0 + SYS___MONETARY_INIT_A = 0x6BC + SYS___MOUNT_A = 0x6F1 + SYS___NL_CSINFO_A = 0x6D6 + SYS___NL_LANGINFO_A = 0x6BA + SYS___NL_LNAGINFO_STD_A = 0x6BB + SYS___NL_MONINFO_A = 0x6D7 + SYS___NL_NUMINFO_A = 0x6D8 + SYS___NL_RESPINFO_A = 0x6D9 + SYS___NL_TIMINFO_A = 0x6DA + SYS___NUMERIC_INIT_A = 0x6C6 + SYS___OPEN_A = 0x6F7 + SYS___PRINTF_A = 0x6DD + SYS___RESP_INIT_A = 0x6C7 + SYS___RPMATCH_A = 0x6C8 + SYS___RPMATCH_C_A = 0x6C9 + SYS___RPMATCH_STD_A = 0x6CA + SYS___SETLOCALE_A = 0x6F9 + SYS___SPAWNP_A = 0x6C5 + SYS___SPAWN_A = 0x6C3 + SYS___SPRINTF_A = 0x6FB + SYS___STAT_A = 0x6EA + SYS___STAT_O_A = 0x6EB + SYS___STRCOLL_STD_A = 0x6A1 + SYS___STRFMON_A = 0x6BD + SYS___STRFMON_STD_A = 0x6BE + SYS___STRFTIME_A = 0x6CC + SYS___STRFTIME_STD_A = 0x6CD + SYS___STRPTIME_A = 0x6CE + SYS___STRPTIME_STD_A = 0x6CF + SYS___STRXFRM_A = 0x6A2 + SYS___STRXFRM_C_A = 0x6A3 + SYS___STRXFRM_STD_A = 0x6A5 + SYS___SYNTAX_INIT_A = 0x6D4 + SYS___TIME_INIT_A = 0x6CB + SYS___TOD_INIT_A = 0x6D5 + SYS___TOWLOWER_A = 0x6B3 + SYS___TOWLOWER_STD_A = 0x6B4 + SYS___TOWUPPER_A = 0x6B5 + SYS___TOWUPPER_STD_A = 0x6B6 + SYS___UMOUNT_A = 0x6F2 + SYS___VFPRINTF_A = 0x6FC + SYS___VPRINTF_A = 0x6FD + SYS___VSPRINTF_A = 0x6FE + SYS___VSWPRINTF_A = 0x6FF + SYS___WCSCOLL_A = 0x6A6 + SYS___WCSCOLL_C_A = 0x6A7 + SYS___WCSCOLL_STD_A = 0x6A8 + SYS___WCSFTIME_A = 0x6D0 + SYS___WCSFTIME_STD_A = 0x6D1 + SYS___WCSXFRM_A = 0x6A9 + SYS___WCSXFRM_C_A = 0x6AA + SYS___WCSXFRM_STD_A = 0x6AB + SYS___WCTYPE_A = 0x6AD + SYS___W_GETMNTENT_A = 0x6F5 + SYS_____CCSIDTYPE_A = 0x6E6 + SYS_____CHATTR_A = 0x6E2 + SYS_____CSNAMETYPE_A = 0x6E7 + SYS_____OPEN_STAT_A = 0x6ED + SYS_____SPAWN2_A = 0x6D2 + SYS_____SPAWNP2_A = 0x6D3 + SYS_____TOCCSID_A = 0x6E4 + SYS_____TOCSNAME_A = 0x6E5 + SYS_ACL_FREE = 0x7FF + SYS_ACL_INIT = 0x7FE + SYS_FWIDE = 0x7DF + SYS_FWPRINTF = 0x7D1 + SYS_FWRITE = 0x07E + SYS_FWSCANF = 0x7D5 + SYS_GETCHAR = 0x07B + SYS_GETS = 0x07C + SYS_M_CREATE_LAYOUT = 0x7C9 + SYS_M_DESTROY_LAYOUT = 0x7CA + SYS_M_GETVALUES_LAYOUT = 0x7CB + SYS_M_SETVALUES_LAYOUT = 0x7CC + SYS_M_TRANSFORM_LAYOUT = 0x7CD + SYS_M_WTRANSFORM_LAYOUT = 0x7CE + SYS_PREAD = 0x7C7 + SYS_PUTC = 0x07D + SYS_PUTCHAR = 0x07A + SYS_PUTS = 0x07F + SYS_PWRITE = 0x7C8 + SYS_TOWCTRAN = 0x7D8 + SYS_TOWCTRANS = 0x7D8 + SYS_UNATEXIT = 0x7B5 + SYS_VFWPRINT = 0x7D3 + SYS_VFWPRINTF = 0x7D3 + SYS_VWPRINTF = 0x7D4 + SYS_WCTRANS = 0x7D7 + SYS_WPRINTF = 0x7D2 + SYS_WSCANF = 0x7D6 + SYS___ASCTIME_R_A = 0x7A1 + SYS___BASENAME_A = 0x7DC + SYS___BTOWC_A = 0x7E4 + SYS___CDUMP_A = 0x7B7 + SYS___CEE3DMP_A = 0x7B6 + SYS___CEILF_H = 0x7F4 + SYS___CEILL_H = 0x7F5 + SYS___CEIL_H = 0x7EA + SYS___CRYPT_A = 0x7BE + SYS___CSNAP_A = 0x7B8 + SYS___CTEST_A = 0x7B9 + SYS___CTIME_R_A = 0x7A2 + SYS___CTRACE_A = 0x7BA + SYS___DBM_OPEN_A = 0x7E6 + SYS___DIRNAME_A = 0x7DD + SYS___FABSF_H = 0x7FA + SYS___FABSL_H = 0x7FB + SYS___FABS_H = 0x7ED + SYS___FGETWC_A = 0x7AA + SYS___FGETWS_A = 0x7AD + SYS___FLOORF_H = 0x7F6 + SYS___FLOORL_H = 0x7F7 + SYS___FLOOR_H = 0x7EB + SYS___FPUTWC_A = 0x7A5 + SYS___FPUTWS_A = 0x7A8 + SYS___GETTIMEOFDAY_A = 0x7AE + SYS___GETWCHAR_A = 0x7AC + SYS___GETWC_A = 0x7AB + SYS___GLOB_A = 0x7DE + SYS___GMTIME_A = 0x7AF + SYS___GMTIME_R_A = 0x7B0 + SYS___INET_PTON_A = 0x7BC + SYS___J0_H = 0x7EE + SYS___J1_H = 0x7EF + SYS___JN_H = 0x7F0 + SYS___LOCALTIME_A = 0x7B1 + SYS___LOCALTIME_R_A = 0x7B2 + SYS___MALLOC24 = 0x7FC + SYS___MALLOC31 = 0x7FD + SYS___MKTIME_A = 0x7B3 + SYS___MODFF_H = 0x7F8 + SYS___MODFL_H = 0x7F9 + SYS___MODF_H = 0x7EC + SYS___OPENDIR_A = 0x7C2 + SYS___OSNAME = 0x7E0 + SYS___PUTWCHAR_A = 0x7A7 + SYS___PUTWC_A = 0x7A6 + SYS___READDIR_A = 0x7C3 + SYS___STRTOLL_A = 0x7A3 + SYS___STRTOULL_A = 0x7A4 + SYS___SYSLOG_A = 0x7BD + SYS___TZZNA = 0x7B4 + SYS___UNGETWC_A = 0x7A9 + SYS___UTIME_A = 0x7A0 + SYS___VFPRINTF2_A = 0x7E7 + SYS___VPRINTF2_A = 0x7E8 + SYS___VSPRINTF2_A = 0x7E9 + SYS___VSWPRNTF2_A = 0x7BB + SYS___WCSTOD_A = 0x7D9 + SYS___WCSTOL_A = 0x7DA + SYS___WCSTOUL_A = 0x7DB + SYS___WCTOB_A = 0x7E5 + SYS___Y0_H = 0x7F1 + SYS___Y1_H = 0x7F2 + SYS___YN_H = 0x7F3 + SYS_____OPENDIR2_A = 0x7BF + SYS_____OSNAME_A = 0x7E1 + SYS_____READDIR2_A = 0x7C0 + SYS_DLCLOSE = 0x8DF + SYS_DLERROR = 0x8E0 + SYS_DLOPEN = 0x8DD + SYS_DLSYM = 0x8DE + SYS_FLOCKFILE = 0x8D3 + SYS_FTRYLOCKFILE = 0x8D4 + SYS_FUNLOCKFILE = 0x8D5 + SYS_GETCHAR_UNLOCKED = 0x8D7 + SYS_GETC_UNLOCKED = 0x8D6 + SYS_PUTCHAR_UNLOCKED = 0x8D9 + SYS_PUTC_UNLOCKED = 0x8D8 + SYS_SNPRINTF = 0x8DA + SYS_VSNPRINTF = 0x8DB + SYS_WCSCSPN = 0x08B + SYS_WCSLEN = 0x08C + SYS_WCSNCAT = 0x08D + SYS_WCSNCMP = 0x08A + SYS_WCSNCPY = 0x08F + SYS_WCSSPN = 0x08E + SYS___ABSF_H = 0x8E7 + SYS___ABSL_H = 0x8E8 + SYS___ABS_H = 0x8E6 + SYS___ACOSF_H = 0x8EA + SYS___ACOSH_H = 0x8EC + SYS___ACOSL_H = 0x8EB + SYS___ACOS_H = 0x8E9 + SYS___ASINF_H = 0x8EE + SYS___ASINH_H = 0x8F0 + SYS___ASINL_H = 0x8EF + SYS___ASIN_H = 0x8ED + SYS___ATAN2F_H = 0x8F8 + SYS___ATAN2L_H = 0x8F9 + SYS___ATAN2_H = 0x8F7 + SYS___ATANF_H = 0x8F2 + SYS___ATANHF_H = 0x8F5 + SYS___ATANHL_H = 0x8F6 + SYS___ATANH_H = 0x8F4 + SYS___ATANL_H = 0x8F3 + SYS___ATAN_H = 0x8F1 + SYS___CBRT_H = 0x8FA + SYS___COPYSIGNF_H = 0x8FB + SYS___COPYSIGNL_H = 0x8FC + SYS___COSF_H = 0x8FE + SYS___COSL_H = 0x8FF + SYS___COS_H = 0x8FD + SYS___DLERROR_A = 0x8D2 + SYS___DLOPEN_A = 0x8D0 + SYS___DLSYM_A = 0x8D1 + SYS___GETUTXENT_A = 0x8C6 + SYS___GETUTXID_A = 0x8C7 + SYS___GETUTXLINE_A = 0x8C8 + SYS___ITOA = 0x8AA + SYS___ITOA_A = 0x8B0 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 + SYS___LE_MSG_ADD_INSERT = 0x8A6 + SYS___LE_MSG_GET = 0x8A7 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 + SYS___LE_MSG_WRITE = 0x8A9 + SYS___LLTOA = 0x8AE + SYS___LLTOA_A = 0x8B4 + SYS___LTOA = 0x8AC + SYS___LTOA_A = 0x8B2 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC + SYS___PUTC_UNLOCKED_A = 0x8CB + SYS___PUTUTXLINE_A = 0x8C9 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 + SYS___REXEC_A = 0x8C4 + SYS___REXEC_AF_A = 0x8C5 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 + SYS___SNPRINTF_A = 0x8CD + SYS___SUPERKILL = 0x8A4 + SYS___TCGETATTR_A = 0x8A1 + SYS___TCSETATTR_A = 0x8A2 + SYS___ULLTOA = 0x8AF + SYS___ULLTOA_A = 0x8B5 + SYS___ULTOA = 0x8AD + SYS___ULTOA_A = 0x8B3 + SYS___UTOA = 0x8AB + SYS___UTOA_A = 0x8B1 + SYS___VHM_EVENT = 0x8E4 + SYS___VSNPRINTF_A = 0x8CE + SYS_____GETENV_A = 0x8C3 + SYS_____UTMPXNAME_A = 0x8CA + SYS_CACOSH = 0x9A0 + SYS_CACOSHF = 0x9A3 + SYS_CACOSHL = 0x9A6 + SYS_CARG = 0x9A9 + SYS_CARGF = 0x9AC + SYS_CARGL = 0x9AF + SYS_CASIN = 0x9B2 + SYS_CASINF = 0x9B5 + SYS_CASINH = 0x9BB + SYS_CASINHF = 0x9BE + SYS_CASINHL = 0x9C1 + SYS_CASINL = 0x9B8 + SYS_CATAN = 0x9C4 + SYS_CATANF = 0x9C7 + SYS_CATANH = 0x9CD + SYS_CATANHF = 0x9D0 + SYS_CATANHL = 0x9D3 + SYS_CATANL = 0x9CA + SYS_CCOS = 0x9D6 + SYS_CCOSF = 0x9D9 + SYS_CCOSH = 0x9DF + SYS_CCOSHF = 0x9E2 + SYS_CCOSHL = 0x9E5 + SYS_CCOSL = 0x9DC + SYS_CEXP = 0x9E8 + SYS_CEXPF = 0x9EB + SYS_CEXPL = 0x9EE + SYS_CIMAG = 0x9F1 + SYS_CIMAGF = 0x9F4 + SYS_CIMAGL = 0x9F7 + SYS_CLOGF = 0x9FD + SYS_MEMCHR = 0x09B + SYS_MEMCMP = 0x09A + SYS_STRCOLL = 0x09C + SYS_STRNCMP = 0x09D + SYS_STRRCHR = 0x09F + SYS_STRXFRM = 0x09E + SYS___CACOSHF_B = 0x9A4 + SYS___CACOSHF_H = 0x9A5 + SYS___CACOSHL_B = 0x9A7 + SYS___CACOSHL_H = 0x9A8 + SYS___CACOSH_B = 0x9A1 + SYS___CACOSH_H = 0x9A2 + SYS___CARGF_B = 0x9AD + SYS___CARGF_H = 0x9AE + SYS___CARGL_B = 0x9B0 + SYS___CARGL_H = 0x9B1 + SYS___CARG_B = 0x9AA + SYS___CARG_H = 0x9AB + SYS___CASINF_B = 0x9B6 + SYS___CASINF_H = 0x9B7 + SYS___CASINHF_B = 0x9BF + SYS___CASINHF_H = 0x9C0 + SYS___CASINHL_B = 0x9C2 + SYS___CASINHL_H = 0x9C3 + SYS___CASINH_B = 0x9BC + SYS___CASINH_H = 0x9BD + SYS___CASINL_B = 0x9B9 + SYS___CASINL_H = 0x9BA + SYS___CASIN_B = 0x9B3 + SYS___CASIN_H = 0x9B4 + SYS___CATANF_B = 0x9C8 + SYS___CATANF_H = 0x9C9 + SYS___CATANHF_B = 0x9D1 + SYS___CATANHF_H = 0x9D2 + SYS___CATANHL_B = 0x9D4 + SYS___CATANHL_H = 0x9D5 + SYS___CATANH_B = 0x9CE + SYS___CATANH_H = 0x9CF + SYS___CATANL_B = 0x9CB + SYS___CATANL_H = 0x9CC + SYS___CATAN_B = 0x9C5 + SYS___CATAN_H = 0x9C6 + SYS___CCOSF_B = 0x9DA + SYS___CCOSF_H = 0x9DB + SYS___CCOSHF_B = 0x9E3 + SYS___CCOSHF_H = 0x9E4 + SYS___CCOSHL_B = 0x9E6 + SYS___CCOSHL_H = 0x9E7 + SYS___CCOSH_B = 0x9E0 + SYS___CCOSH_H = 0x9E1 + SYS___CCOSL_B = 0x9DD + SYS___CCOSL_H = 0x9DE + SYS___CCOS_B = 0x9D7 + SYS___CCOS_H = 0x9D8 + SYS___CEXPF_B = 0x9EC + SYS___CEXPF_H = 0x9ED + SYS___CEXPL_B = 0x9EF + SYS___CEXPL_H = 0x9F0 + SYS___CEXP_B = 0x9E9 + SYS___CEXP_H = 0x9EA + SYS___CIMAGF_B = 0x9F5 + SYS___CIMAGF_H = 0x9F6 + SYS___CIMAGL_B = 0x9F8 + SYS___CIMAGL_H = 0x9F9 + SYS___CIMAG_B = 0x9F2 + SYS___CIMAG_H = 0x9F3 + SYS___CLOG = 0x9FA + SYS___CLOGF_B = 0x9FE + SYS___CLOGF_H = 0x9FF + SYS___CLOG_B = 0x9FB + SYS___CLOG_H = 0x9FC + SYS_ISWCTYPE = 0x10C + SYS_ISWXDIGI = 0x10A + SYS_ISWXDIGIT = 0x10A + SYS_MBSINIT = 0x10F + SYS_TOWLOWER = 0x10D + SYS_TOWUPPER = 0x10E + SYS_WCTYPE = 0x10B + SYS_WCSSTR = 0x11B + SYS___RPMTCH = 0x11A + SYS_WCSTOD = 0x12E + SYS_WCSTOK = 0x12C + SYS_WCSTOL = 0x12D + SYS_WCSTOUL = 0x12F + SYS_FGETWC = 0x13C + SYS_FGETWS = 0x13D + SYS_FPUTWC = 0x13E + SYS_FPUTWS = 0x13F + SYS_REGERROR = 0x13B + SYS_REGFREE = 0x13A + SYS_COLLEQUIV = 0x14F + SYS_COLLTOSTR = 0x14E + SYS_ISMCCOLLEL = 0x14C + SYS_STRTOCOLL = 0x14D + SYS_DLLFREE = 0x16F + SYS_DLLQUERYFN = 0x16D + SYS_DLLQUERYVAR = 0x16E + SYS_GETMCCOLL = 0x16A + SYS_GETWMCCOLL = 0x16B + SYS___ERR2AD = 0x16C + SYS_CFSETOSPEED = 0x17A + SYS_CHDIR = 0x17B + SYS_CHMOD = 0x17C + SYS_CHOWN = 0x17D + SYS_CLOSE = 0x17E + SYS_CLOSEDIR = 0x17F + SYS_LOG = 0x017 + SYS_COSH = 0x018 + SYS_FCHMOD = 0x18A + SYS_FCHOWN = 0x18B + SYS_FCNTL = 0x18C + SYS_FILENO = 0x18D + SYS_FORK = 0x18E + SYS_FPATHCONF = 0x18F + SYS_GETLOGIN = 0x19A + SYS_GETPGRP = 0x19C + SYS_GETPID = 0x19D + SYS_GETPPID = 0x19E + SYS_GETPWNAM = 0x19F + SYS_TANH = 0x019 + SYS_W_GETMNTENT = 0x19B + SYS_POW = 0x020 + SYS_PTHREAD_SELF = 0x20A + SYS_PTHREAD_SETINTR = 0x20B + SYS_PTHREAD_SETINTRTYPE = 0x20C + SYS_PTHREAD_SETSPECIFIC = 0x20D + SYS_PTHREAD_TESTINTR = 0x20E + SYS_PTHREAD_YIELD = 0x20F + SYS_SQRT = 0x021 + SYS_FLOOR = 0x022 + SYS_J1 = 0x023 + SYS_WCSPBRK = 0x23F + SYS_BSEARCH = 0x24C + SYS_FABS = 0x024 + SYS_GETENV = 0x24A + SYS_LDIV = 0x24D + SYS_SYSTEM = 0x24B + SYS_FMOD = 0x025 + SYS___RETHROW = 0x25F + SYS___THROW = 0x25E + SYS_J0 = 0x026 + SYS_PUTENV = 0x26A + SYS___GETENV = 0x26F + SYS_SEMCTL = 0x27A + SYS_SEMGET = 0x27B + SYS_SEMOP = 0x27C + SYS_SHMAT = 0x27D + SYS_SHMCTL = 0x27E + SYS_SHMDT = 0x27F + SYS_YN = 0x027 + SYS_JN = 0x028 + SYS_SIGALTSTACK = 0x28A + SYS_SIGHOLD = 0x28B + SYS_SIGIGNORE = 0x28C + SYS_SIGINTERRUPT = 0x28D + SYS_SIGPAUSE = 0x28E + SYS_SIGRELSE = 0x28F + SYS_GETOPT = 0x29A + SYS_GETSUBOPT = 0x29D + SYS_LCHOWN = 0x29B + SYS_SETPGRP = 0x29E + SYS_TRUNCATE = 0x29C + SYS_Y0 = 0x029 + SYS___GDERR = 0x29F + SYS_ISALPHA = 0x030 + SYS_VFORK = 0x30F + SYS__LONGJMP = 0x30D + SYS__SETJMP = 0x30E + SYS_GLOB = 0x31A + SYS_GLOBFREE = 0x31B + SYS_ISALNUM = 0x031 + SYS_PUTW = 0x31C + SYS_SEEKDIR = 0x31D + SYS_TELLDIR = 0x31E + SYS_TEMPNAM = 0x31F + SYS_GETTIMEOFDAY_R = 0x32E + SYS_ISLOWER = 0x032 + SYS_LGAMMA = 0x32C + SYS_REMAINDER = 0x32A + SYS_SCALB = 0x32B + SYS_SYNC = 0x32F + SYS_TTYSLOT = 0x32D + SYS_ENDPROTOENT = 0x33A + SYS_ENDSERVENT = 0x33B + SYS_GETHOSTBYADDR = 0x33D + SYS_GETHOSTBYADDR_R = 0x33C + SYS_GETHOSTBYNAME = 0x33F + SYS_GETHOSTBYNAME_R = 0x33E + SYS_ISCNTRL = 0x033 + SYS_GETSERVBYNAME = 0x34A + SYS_GETSERVBYPORT = 0x34B + SYS_GETSERVENT = 0x34C + SYS_GETSOCKNAME = 0x34D + SYS_GETSOCKOPT = 0x34E + SYS_INET_ADDR = 0x34F + SYS_ISDIGIT = 0x034 + SYS_ISGRAPH = 0x035 + SYS_SELECT = 0x35B + SYS_SELECTEX = 0x35C + SYS_SEND = 0x35D + SYS_SENDTO = 0x35F + SYS_CHROOT = 0x36A + SYS_ISNAN = 0x36D + SYS_ISUPPER = 0x036 + SYS_ULIMIT = 0x36C + SYS_UTIMES = 0x36E + SYS_W_STATVFS = 0x36B + SYS___H_ERRNO = 0x36F + SYS_GRANTPT = 0x37A + SYS_ISPRINT = 0x037 + SYS_TCGETSID = 0x37C + SYS_UNLOCKPT = 0x37B + SYS___TCGETCP = 0x37D + SYS___TCSETCP = 0x37E + SYS___TCSETTABLES = 0x37F + SYS_ISPUNCT = 0x038 + SYS_NLIST = 0x38C + SYS___IPDBCS = 0x38D + SYS___IPDSPX = 0x38E + SYS___IPMSGC = 0x38F + SYS___STHOSTENT = 0x38B + SYS___STSERVENT = 0x38A + SYS_ISSPACE = 0x039 + SYS_COS = 0x040 + SYS_T_ALLOC = 0x40A + SYS_T_BIND = 0x40B + SYS_T_CLOSE = 0x40C + SYS_T_CONNECT = 0x40D + SYS_T_ERROR = 0x40E + SYS_T_FREE = 0x40F + SYS_TAN = 0x041 + SYS_T_RCVREL = 0x41A + SYS_T_RCVUDATA = 0x41B + SYS_T_RCVUDERR = 0x41C + SYS_T_SND = 0x41D + SYS_T_SNDDIS = 0x41E + SYS_T_SNDREL = 0x41F + SYS_GETPMSG = 0x42A + SYS_ISASTREAM = 0x42B + SYS_PUTMSG = 0x42C + SYS_PUTPMSG = 0x42D + SYS_SINH = 0x042 + SYS___ISPOSIXON = 0x42E + SYS___OPENMVSREL = 0x42F + SYS_ACOS = 0x043 + SYS_ATAN = 0x044 + SYS_ATAN2 = 0x045 + SYS_FTELL = 0x046 + SYS_FGETPOS = 0x047 + SYS_SOCK_DEBUG = 0x47A + SYS_SOCK_DO_TESTSTOR = 0x47D + SYS_TAKESOCKET = 0x47E + SYS___SERVER_INIT = 0x47F + SYS_FSEEK = 0x048 + SYS___IPHOST = 0x48B + SYS___IPNODE = 0x48C + SYS___SERVER_CLASSIFY_CREATE = 0x48D + SYS___SERVER_CLASSIFY_DESTROY = 0x48E + SYS___SERVER_CLASSIFY_RESET = 0x48F + SYS___SMF_RECORD = 0x48A + SYS_FSETPOS = 0x049 + SYS___FNWSA = 0x49B + SYS___SPAWN2 = 0x49D + SYS___SPAWNP2 = 0x49E + SYS_ATOF = 0x050 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C + SYS_PTHREAD_RWLOCK_INIT = 0x50D + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F + SYS_ATOI = 0x051 + SYS___FP_CLASS = 0x51D + SYS___FP_CLR_FLAG = 0x51A + SYS___FP_FINITE = 0x51E + SYS___FP_ISNAN = 0x51F + SYS___FP_RAISE_XCP = 0x51C + SYS___FP_READ_FLAG = 0x51B + SYS_RAND = 0x052 + SYS_SIGTIMEDWAIT = 0x52D + SYS_SIGWAITINFO = 0x52E + SYS___CHKBFP = 0x52F + SYS___FPC_RS = 0x52C + SYS___FPC_RW = 0x52A + SYS___FPC_SM = 0x52B + SYS_STRTOD = 0x053 + SYS_STRTOL = 0x054 + SYS_STRTOUL = 0x055 + SYS_MALLOC = 0x056 + SYS_SRAND = 0x057 + SYS_CALLOC = 0x058 + SYS_FREE = 0x059 + SYS___OSENV = 0x59F + SYS___W_PIOCTL = 0x59E + SYS_LONGJMP = 0x060 + SYS___FLOORF_B = 0x60A + SYS___FLOORL_B = 0x60B + SYS___FREXPF_B = 0x60C + SYS___FREXPL_B = 0x60D + SYS___LDEXPF_B = 0x60E + SYS___LDEXPL_B = 0x60F + SYS_SIGNAL = 0x061 + SYS___ATAN2F_B = 0x61A + SYS___ATAN2L_B = 0x61B + SYS___COSHF_B = 0x61C + SYS___COSHL_B = 0x61D + SYS___EXPF_B = 0x61E + SYS___EXPL_B = 0x61F + SYS_TMPNAM = 0x062 + SYS___ABSF_B = 0x62A + SYS___ABSL_B = 0x62C + SYS___ABS_B = 0x62B + SYS___FMODF_B = 0x62D + SYS___FMODL_B = 0x62E + SYS___MODFF_B = 0x62F + SYS_ATANL = 0x63A + SYS_CEILF = 0x63B + SYS_CEILL = 0x63C + SYS_COSF = 0x63D + SYS_COSHF = 0x63F + SYS_COSL = 0x63E + SYS_REMOVE = 0x063 + SYS_POWL = 0x64A + SYS_RENAME = 0x064 + SYS_SINF = 0x64B + SYS_SINHF = 0x64F + SYS_SINL = 0x64C + SYS_SQRTF = 0x64D + SYS_SQRTL = 0x64E + SYS_BTOWC = 0x65F + SYS_FREXPL = 0x65A + SYS_LDEXPF = 0x65B + SYS_LDEXPL = 0x65C + SYS_MODFF = 0x65D + SYS_MODFL = 0x65E + SYS_TMPFILE = 0x065 + SYS_FREOPEN = 0x066 + SYS___CHARMAP_INIT_A = 0x66E + SYS___GETHOSTBYADDR_R_A = 0x66C + SYS___GETHOSTBYNAME_A = 0x66A + SYS___GETHOSTBYNAME_R_A = 0x66D + SYS___MBLEN_A = 0x66F + SYS___RES_INIT_A = 0x66B + SYS_FCLOSE = 0x067 + SYS___GETGRGID_R_A = 0x67D + SYS___WCSTOMBS_A = 0x67A + SYS___WCSTOMBS_STD_A = 0x67B + SYS___WCSWIDTH_A = 0x67C + SYS___WCSWIDTH_ASIA = 0x67F + SYS___WCSWIDTH_STD_A = 0x67E + SYS_FFLUSH = 0x068 + SYS___GETLOGIN_R_A = 0x68E + SYS___GETPWNAM_R_A = 0x68C + SYS___GETPWUID_R_A = 0x68D + SYS___TTYNAME_R_A = 0x68F + SYS___WCWIDTH_ASIA = 0x68B + SYS___WCWIDTH_STD_A = 0x68A + SYS_FOPEN = 0x069 + SYS___REGEXEC_A = 0x69A + SYS___REGEXEC_STD_A = 0x69B + SYS___REGFREE_A = 0x69C + SYS___REGFREE_STD_A = 0x69D + SYS___STRCOLL_A = 0x69E + SYS___STRCOLL_C_A = 0x69F + SYS_SCANF = 0x070 + SYS___A64L_A = 0x70C + SYS___ECVT_A = 0x70D + SYS___FCVT_A = 0x70E + SYS___GCVT_A = 0x70F + SYS___STRTOUL_A = 0x70A + SYS_____AE_CORRESTBL_QUERY_A = 0x70B + SYS_SPRINTF = 0x071 + SYS___ACCESS_A = 0x71F + SYS___CATOPEN_A = 0x71E + SYS___GETOPT_A = 0x71D + SYS___REALPATH_A = 0x71A + SYS___SETENV_A = 0x71B + SYS___SYSTEM_A = 0x71C + SYS_FGETC = 0x072 + SYS___GAI_STRERROR_A = 0x72F + SYS___RMDIR_A = 0x72A + SYS___STATVFS_A = 0x72B + SYS___SYMLINK_A = 0x72C + SYS___TRUNCATE_A = 0x72D + SYS___UNLINK_A = 0x72E + SYS_VFPRINTF = 0x073 + SYS___ISSPACE_A = 0x73A + SYS___ISUPPER_A = 0x73B + SYS___ISWALNUM_A = 0x73F + SYS___ISXDIGIT_A = 0x73C + SYS___TOLOWER_A = 0x73D + SYS___TOUPPER_A = 0x73E + SYS_VPRINTF = 0x074 + SYS___CONFSTR_A = 0x74B + SYS___FDOPEN_A = 0x74E + SYS___FLDATA_A = 0x74F + SYS___FTOK_A = 0x74C + SYS___ISWXDIGIT_A = 0x74A + SYS___MKTEMP_A = 0x74D + SYS_VSPRINTF = 0x075 + SYS___GETGRGID_A = 0x75A + SYS___GETGRNAM_A = 0x75B + SYS___GETGROUPSBYNAME_A = 0x75C + SYS___GETHOSTENT_A = 0x75D + SYS___GETHOSTNAME_A = 0x75E + SYS___GETLOGIN_A = 0x75F + SYS_GETC = 0x076 + SYS___CREATEWORKUNIT_A = 0x76A + SYS___CTERMID_A = 0x76B + SYS___FMTMSG_A = 0x76C + SYS___INITGROUPS_A = 0x76D + SYS___MSGRCV_A = 0x76F + SYS_____LOGIN_A = 0x76E + SYS_FGETS = 0x077 + SYS___STRCASECMP_A = 0x77B + SYS___STRNCASECMP_A = 0x77C + SYS___TTYNAME_A = 0x77D + SYS___UNAME_A = 0x77E + SYS___UTIMES_A = 0x77F + SYS_____SERVER_PWU_A = 0x77A + SYS_FPUTC = 0x078 + SYS___CREAT_O_A = 0x78E + SYS___ENVNA = 0x78F + SYS___FREAD_A = 0x78A + SYS___FWRITE_A = 0x78B + SYS___ISASCII = 0x78D + SYS___OPEN_O_A = 0x78C + SYS_FPUTS = 0x079 + SYS___ASCTIME_A = 0x79C + SYS___CTIME_A = 0x79D + SYS___GETDATE_A = 0x79E + SYS___GETSERVBYPORT_A = 0x79A + SYS___GETSERVENT_A = 0x79B + SYS___TZSET_A = 0x79F + SYS_ACL_FROM_TEXT = 0x80C + SYS_ACL_SET_FD = 0x80A + SYS_ACL_SET_FILE = 0x80B + SYS_ACL_SORT = 0x80E + SYS_ACL_TO_TEXT = 0x80D + SYS_UNGETC = 0x080 + SYS___SHUTDOWN_REGISTRATION = 0x80F + SYS_FREAD = 0x081 + SYS_FREEADDRINFO = 0x81A + SYS_GAI_STRERROR = 0x81B + SYS_REXEC_AF = 0x81C + SYS___DYNALLOC_A = 0x81F + SYS___POE = 0x81D + SYS_WCSTOMBS = 0x082 + SYS___INET_ADDR_A = 0x82F + SYS___NLIST_A = 0x82A + SYS_____TCGETCP_A = 0x82B + SYS_____TCSETCP_A = 0x82C + SYS_____W_PIOCTL_A = 0x82E + SYS_MBTOWC = 0x083 + SYS___CABEND = 0x83D + SYS___LE_CIB_GET = 0x83E + SYS___RECVMSG_A = 0x83B + SYS___SENDMSG_A = 0x83A + SYS___SET_LAA_FOR_JIT = 0x83F + SYS_____LCHATTR_A = 0x83C + SYS_WCTOMB = 0x084 + SYS___CBRTL_B = 0x84A + SYS___COPYSIGNF_B = 0x84B + SYS___COPYSIGNL_B = 0x84C + SYS___COTANF_B = 0x84D + SYS___COTANL_B = 0x84F + SYS___COTAN_B = 0x84E + SYS_MBSTOWCS = 0x085 + SYS___LOG1PL_B = 0x85A + SYS___LOG2F_B = 0x85B + SYS___LOG2L_B = 0x85D + SYS___LOG2_B = 0x85C + SYS___REMAINDERF_B = 0x85E + SYS___REMAINDERL_B = 0x85F + SYS_ACOSHF = 0x86E + SYS_ACOSHL = 0x86F + SYS_WCSCPY = 0x086 + SYS___ERFCF_B = 0x86D + SYS___ERFF_B = 0x86C + SYS___LROUNDF_B = 0x86A + SYS___LROUND_B = 0x86B + SYS_COTANL = 0x87A + SYS_EXP2F = 0x87B + SYS_EXP2L = 0x87C + SYS_EXPM1F = 0x87D + SYS_EXPM1L = 0x87E + SYS_FDIMF = 0x87F + SYS_WCSCAT = 0x087 + SYS___COTANL = 0x87A + SYS_REMAINDERF = 0x88A + SYS_REMAINDERL = 0x88B + SYS_REMAINDF = 0x88A + SYS_REMAINDL = 0x88B + SYS_REMQUO = 0x88D + SYS_REMQUOF = 0x88C + SYS_REMQUOL = 0x88E + SYS_TGAMMAF = 0x88F + SYS_WCSCHR = 0x088 + SYS_ERFCF = 0x89B + SYS_ERFCL = 0x89C + SYS_ERFL = 0x89A + SYS_EXP2 = 0x89E + SYS_WCSCMP = 0x089 + SYS___EXP2_B = 0x89D + SYS___FAR_JUMP = 0x89F + SYS_ABS = 0x090 + SYS___ERFCL_H = 0x90A + SYS___EXPF_H = 0x90C + SYS___EXPL_H = 0x90D + SYS___EXPM1_H = 0x90E + SYS___EXP_H = 0x90B + SYS___FDIM_H = 0x90F + SYS_DIV = 0x091 + SYS___LOG2F_H = 0x91F + SYS___LOG2_H = 0x91E + SYS___LOGB_H = 0x91D + SYS___LOGF_H = 0x91B + SYS___LOGL_H = 0x91C + SYS___LOG_H = 0x91A + SYS_LABS = 0x092 + SYS___POWL_H = 0x92A + SYS___REMAINDER_H = 0x92B + SYS___RINT_H = 0x92C + SYS___SCALB_H = 0x92D + SYS___SINF_H = 0x92F + SYS___SIN_H = 0x92E + SYS_STRNCPY = 0x093 + SYS___TANHF_H = 0x93B + SYS___TANHL_H = 0x93C + SYS___TANH_H = 0x93A + SYS___TGAMMAF_H = 0x93E + SYS___TGAMMA_H = 0x93D + SYS___TRUNC_H = 0x93F + SYS_MEMCPY = 0x094 + SYS_VFWSCANF = 0x94A + SYS_VSWSCANF = 0x94E + SYS_VWSCANF = 0x94C + SYS_INET6_RTH_ADD = 0x95D + SYS_INET6_RTH_INIT = 0x95C + SYS_INET6_RTH_REVERSE = 0x95E + SYS_INET6_RTH_SEGMENTS = 0x95F + SYS_INET6_RTH_SPACE = 0x95B + SYS_MEMMOVE = 0x095 + SYS_WCSTOLD = 0x95A + SYS_STRCPY = 0x096 + SYS_STRCMP = 0x097 + SYS_CABS = 0x98E + SYS_STRCAT = 0x098 + SYS___CABS_B = 0x98F + SYS___POW_II = 0x98A + SYS___POW_II_B = 0x98B + SYS___POW_II_H = 0x98C + SYS_CACOSF = 0x99A + SYS_CACOSL = 0x99D + SYS_STRNCAT = 0x099 + SYS___CACOSF_B = 0x99B + SYS___CACOSF_H = 0x99C + SYS___CACOSL_B = 0x99E + SYS___CACOSL_H = 0x99F + SYS_ISWALPHA = 0x100 + SYS_ISWBLANK = 0x101 + SYS___ISWBLK = 0x101 + SYS_ISWCNTRL = 0x102 + SYS_ISWDIGIT = 0x103 + SYS_ISWGRAPH = 0x104 + SYS_ISWLOWER = 0x105 + SYS_ISWPRINT = 0x106 + SYS_ISWPUNCT = 0x107 + SYS_ISWSPACE = 0x108 + SYS_ISWUPPER = 0x109 + SYS_WCTOB = 0x110 + SYS_MBRLEN = 0x111 + SYS_MBRTOWC = 0x112 + SYS_MBSRTOWC = 0x113 + SYS_MBSRTOWCS = 0x113 + SYS_WCRTOMB = 0x114 + SYS_WCSRTOMB = 0x115 + SYS_WCSRTOMBS = 0x115 + SYS___CSID = 0x116 + SYS___WCSID = 0x117 + SYS_STRPTIME = 0x118 + SYS___STRPTM = 0x118 + SYS_STRFMON = 0x119 + SYS_WCSCOLL = 0x130 + SYS_WCSXFRM = 0x131 + SYS_WCSWIDTH = 0x132 + SYS_WCWIDTH = 0x133 + SYS_WCSFTIME = 0x134 + SYS_SWPRINTF = 0x135 + SYS_VSWPRINT = 0x136 + SYS_VSWPRINTF = 0x136 + SYS_SWSCANF = 0x137 + SYS_REGCOMP = 0x138 + SYS_REGEXEC = 0x139 + SYS_GETWC = 0x140 + SYS_GETWCHAR = 0x141 + SYS_PUTWC = 0x142 + SYS_PUTWCHAR = 0x143 + SYS_UNGETWC = 0x144 + SYS_ICONV_OPEN = 0x145 + SYS_ICONV = 0x146 + SYS_ICONV_CLOSE = 0x147 + SYS_COLLRANGE = 0x150 + SYS_CCLASS = 0x151 + SYS_COLLORDER = 0x152 + SYS___DEMANGLE = 0x154 + SYS_FDOPEN = 0x155 + SYS___ERRNO = 0x156 + SYS___ERRNO2 = 0x157 + SYS___TERROR = 0x158 + SYS_MAXCOLL = 0x169 + SYS_DLLLOAD = 0x170 + SYS__EXIT = 0x174 + SYS_ACCESS = 0x175 + SYS_ALARM = 0x176 + SYS_CFGETISPEED = 0x177 + SYS_CFGETOSPEED = 0x178 + SYS_CFSETISPEED = 0x179 + SYS_CREAT = 0x180 + SYS_CTERMID = 0x181 + SYS_DUP = 0x182 + SYS_DUP2 = 0x183 + SYS_EXECL = 0x184 + SYS_EXECLE = 0x185 + SYS_EXECLP = 0x186 + SYS_EXECV = 0x187 + SYS_EXECVE = 0x188 + SYS_EXECVP = 0x189 + SYS_FSTAT = 0x190 + SYS_FSYNC = 0x191 + SYS_FTRUNCATE = 0x192 + SYS_GETCWD = 0x193 + SYS_GETEGID = 0x194 + SYS_GETEUID = 0x195 + SYS_GETGID = 0x196 + SYS_GETGRGID = 0x197 + SYS_GETGRNAM = 0x198 + SYS_GETGROUPS = 0x199 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 + SYS_PTHREAD_MUTEX_INIT = 0x203 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 + SYS_PTHREAD_MUTEX_LOCK = 0x205 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 + SYS_PTHREAD_ONCE = 0x209 + SYS_TW_OPEN = 0x210 + SYS_TW_FCNTL = 0x211 + SYS_PTHREAD_JOIN_D4_NP = 0x212 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 + SYS_EXTLINK_NP = 0x215 + SYS___PASSWD = 0x216 + SYS_SETGROUPS = 0x217 + SYS_INITGROUPS = 0x218 + SYS_WCSRCHR = 0x240 + SYS_SVC99 = 0x241 + SYS___SVC99 = 0x241 + SYS_WCSWCS = 0x242 + SYS_LOCALECO = 0x243 + SYS_LOCALECONV = 0x243 + SYS___LIBREL = 0x244 + SYS_RELEASE = 0x245 + SYS___RLSE = 0x245 + SYS_FLOCATE = 0x246 + SYS___FLOCT = 0x246 + SYS_FDELREC = 0x247 + SYS___FDLREC = 0x247 + SYS_FETCH = 0x248 + SYS___FETCH = 0x248 + SYS_QSORT = 0x249 + SYS___CLEANUPCATCH = 0x260 + SYS___CATCHMATCH = 0x261 + SYS___CLEAN2UPCATCH = 0x262 + SYS_GETPRIORITY = 0x270 + SYS_NICE = 0x271 + SYS_SETPRIORITY = 0x272 + SYS_GETITIMER = 0x273 + SYS_SETITIMER = 0x274 + SYS_MSGCTL = 0x275 + SYS_MSGGET = 0x276 + SYS_MSGRCV = 0x277 + SYS_MSGSND = 0x278 + SYS_MSGXRCV = 0x279 + SYS___MSGXR = 0x279 + SYS_SHMGET = 0x280 + SYS___GETIPC = 0x281 + SYS_SETGRENT = 0x282 + SYS_GETGRENT = 0x283 + SYS_ENDGRENT = 0x284 + SYS_SETPWENT = 0x285 + SYS_GETPWENT = 0x286 + SYS_ENDPWENT = 0x287 + SYS_BSD_SIGNAL = 0x288 + SYS_KILLPG = 0x289 + SYS_SIGSET = 0x290 + SYS_SIGSTACK = 0x291 + SYS_GETRLIMIT = 0x292 + SYS_SETRLIMIT = 0x293 + SYS_GETRUSAGE = 0x294 + SYS_MMAP = 0x295 + SYS_MPROTECT = 0x296 + SYS_MSYNC = 0x297 + SYS_MUNMAP = 0x298 + SYS_CONFSTR = 0x299 + SYS___NDMTRM = 0x300 + SYS_FTOK = 0x301 + SYS_BASENAME = 0x302 + SYS_DIRNAME = 0x303 + SYS_GETDTABLESIZE = 0x304 + SYS_MKSTEMP = 0x305 + SYS_MKTEMP = 0x306 + SYS_NFTW = 0x307 + SYS_GETWD = 0x308 + SYS_LOCKF = 0x309 + SYS_WORDEXP = 0x310 + SYS_WORDFREE = 0x311 + SYS_GETPGID = 0x312 + SYS_GETSID = 0x313 + SYS___UTMPXNAME = 0x314 + SYS_CUSERID = 0x315 + SYS_GETPASS = 0x316 + SYS_FNMATCH = 0x317 + SYS_FTW = 0x318 + SYS_GETW = 0x319 + SYS_ACOSH = 0x320 + SYS_ASINH = 0x321 + SYS_ATANH = 0x322 + SYS_CBRT = 0x323 + SYS_EXPM1 = 0x324 + SYS_ILOGB = 0x325 + SYS_LOGB = 0x326 + SYS_LOG1P = 0x327 + SYS_NEXTAFTER = 0x328 + SYS_RINT = 0x329 + SYS_SPAWN = 0x330 + SYS_SPAWNP = 0x331 + SYS_GETLOGIN_UU = 0x332 + SYS_ECVT = 0x333 + SYS_FCVT = 0x334 + SYS_GCVT = 0x335 + SYS_ACCEPT = 0x336 + SYS_BIND = 0x337 + SYS_CONNECT = 0x338 + SYS_ENDHOSTENT = 0x339 + SYS_GETHOSTENT = 0x340 + SYS_GETHOSTID = 0x341 + SYS_GETHOSTNAME = 0x342 + SYS_GETNETBYADDR = 0x343 + SYS_GETNETBYNAME = 0x344 + SYS_GETNETENT = 0x345 + SYS_GETPEERNAME = 0x346 + SYS_GETPROTOBYNAME = 0x347 + SYS_GETPROTOBYNUMBER = 0x348 + SYS_GETPROTOENT = 0x349 + SYS_INET_LNAOF = 0x350 + SYS_INET_MAKEADDR = 0x351 + SYS_INET_NETOF = 0x352 + SYS_INET_NETWORK = 0x353 + SYS_INET_NTOA = 0x354 + SYS_IOCTL = 0x355 + SYS_LISTEN = 0x356 + SYS_READV = 0x357 + SYS_RECV = 0x358 + SYS_RECVFROM = 0x359 + SYS_SETHOSTENT = 0x360 + SYS_SETNETENT = 0x361 + SYS_SETPEER = 0x362 + SYS_SETPROTOENT = 0x363 + SYS_SETSERVENT = 0x364 + SYS_SETSOCKOPT = 0x365 + SYS_SHUTDOWN = 0x366 + SYS_SOCKET = 0x367 + SYS_SOCKETPAIR = 0x368 + SYS_WRITEV = 0x369 + SYS_ENDNETENT = 0x370 + SYS_CLOSELOG = 0x371 + SYS_OPENLOG = 0x372 + SYS_SETLOGMASK = 0x373 + SYS_SYSLOG = 0x374 + SYS_PTSNAME = 0x375 + SYS_SETREUID = 0x376 + SYS_SETREGID = 0x377 + SYS_REALPATH = 0x378 + SYS___SIGNGAM = 0x379 + SYS_POLL = 0x380 + SYS_REXEC = 0x381 + SYS___ISASCII2 = 0x382 + SYS___TOASCII2 = 0x383 + SYS_CHPRIORITY = 0x384 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 + SYS___STNETENT = 0x388 + SYS___STPROTOENT = 0x389 + SYS___SELECT1 = 0x390 + SYS_PTHREAD_SECURITY_NP = 0x391 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 + SYS___CONVERT_ID_NP = 0x393 + SYS___OPENVMREL = 0x394 + SYS_WMEMCHR = 0x395 + SYS_WMEMCMP = 0x396 + SYS_WMEMCPY = 0x397 + SYS_WMEMMOVE = 0x398 + SYS_WMEMSET = 0x399 + SYS___FPUTWC = 0x400 + SYS___PUTWC = 0x401 + SYS___PWCHAR = 0x402 + SYS___WCSFTM = 0x403 + SYS___WCSTOK = 0x404 + SYS___WCWDTH = 0x405 + SYS_T_ACCEPT = 0x409 + SYS_T_GETINFO = 0x410 + SYS_T_GETPROTADDR = 0x411 + SYS_T_GETSTATE = 0x412 + SYS_T_LISTEN = 0x413 + SYS_T_LOOK = 0x414 + SYS_T_OPEN = 0x415 + SYS_T_OPTMGMT = 0x416 + SYS_T_RCV = 0x417 + SYS_T_RCVCONNECT = 0x418 + SYS_T_RCVDIS = 0x419 + SYS_T_SNDUDATA = 0x420 + SYS_T_STRERROR = 0x421 + SYS_T_SYNC = 0x422 + SYS_T_UNBIND = 0x423 + SYS___T_ERRNO = 0x424 + SYS___RECVMSG2 = 0x425 + SYS___SENDMSG2 = 0x426 + SYS_FATTACH = 0x427 + SYS_FDETACH = 0x428 + SYS_GETMSG = 0x429 + SYS_GETCONTEXT = 0x430 + SYS_SETCONTEXT = 0x431 + SYS_MAKECONTEXT = 0x432 + SYS_SWAPCONTEXT = 0x433 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 + SYS_GETCLIENTID = 0x470 + SYS___GETCLIENTID = 0x471 + SYS_GETSTABLESIZE = 0x472 + SYS_GETIBMOPT = 0x473 + SYS_GETIBMSOCKOPT = 0x474 + SYS_GIVESOCKET = 0x475 + SYS_IBMSFLUSH = 0x476 + SYS_MAXDESC = 0x477 + SYS_SETIBMOPT = 0x478 + SYS_SETIBMSOCKOPT = 0x479 + SYS___SERVER_PWU = 0x480 + SYS_PTHREAD_TAG_NP = 0x481 + SYS___CONSOLE = 0x482 + SYS___WSINIT = 0x483 + SYS___IPTCPN = 0x489 + SYS___SERVER_CLASSIFY = 0x490 + SYS___HEAPRPT = 0x496 + SYS___ISBFP = 0x500 + SYS___FP_CAST = 0x501 + SYS___CERTIFICATE = 0x502 + SYS_SEND_FILE = 0x503 + SYS_AIO_CANCEL = 0x504 + SYS_AIO_ERROR = 0x505 + SYS_AIO_READ = 0x506 + SYS_AIO_RETURN = 0x507 + SYS_AIO_SUSPEND = 0x508 + SYS_AIO_WRITE = 0x509 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 + SYS___CTTBL = 0x517 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 + SYS___FP_UNORDERED = 0x520 + SYS___FP_READ_RND = 0x521 + SYS___FP_READ_RND_B = 0x522 + SYS___FP_SWAP_RND = 0x523 + SYS___FP_SWAP_RND_B = 0x524 + SYS___FP_LEVEL = 0x525 + SYS___FP_BTOH = 0x526 + SYS___FP_HTOB = 0x527 + SYS___FPC_RD = 0x528 + SYS___FPC_WR = 0x529 + SYS_PTHREAD_SETCANCELTYPE = 0x600 + SYS_PTHREAD_TESTCANCEL = 0x601 + SYS___ATANF_B = 0x602 + SYS___ATANL_B = 0x603 + SYS___CEILF_B = 0x604 + SYS___CEILL_B = 0x605 + SYS___COSF_B = 0x606 + SYS___COSL_B = 0x607 + SYS___FABSF_B = 0x608 + SYS___FABSL_B = 0x609 + SYS___SINF_B = 0x610 + SYS___SINL_B = 0x611 + SYS___TANF_B = 0x612 + SYS___TANL_B = 0x613 + SYS___TANHF_B = 0x614 + SYS___TANHL_B = 0x615 + SYS___ACOSF_B = 0x616 + SYS___ACOSL_B = 0x617 + SYS___ASINF_B = 0x618 + SYS___ASINL_B = 0x619 + SYS___LOGF_B = 0x620 + SYS___LOGL_B = 0x621 + SYS___LOG10F_B = 0x622 + SYS___LOG10L_B = 0x623 + SYS___POWF_B = 0x624 + SYS___POWL_B = 0x625 + SYS___SINHF_B = 0x626 + SYS___SINHL_B = 0x627 + SYS___SQRTF_B = 0x628 + SYS___SQRTL_B = 0x629 + SYS___MODFL_B = 0x630 + SYS_ABSF = 0x631 + SYS_ABSL = 0x632 + SYS_ACOSF = 0x633 + SYS_ACOSL = 0x634 + SYS_ASINF = 0x635 + SYS_ASINL = 0x636 + SYS_ATAN2F = 0x637 + SYS_ATAN2L = 0x638 + SYS_ATANF = 0x639 + SYS_COSHL = 0x640 + SYS_EXPF = 0x641 + SYS_EXPL = 0x642 + SYS_TANHF = 0x643 + SYS_TANHL = 0x644 + SYS_LOG10F = 0x645 + SYS_LOG10L = 0x646 + SYS_LOGF = 0x647 + SYS_LOGL = 0x648 + SYS_POWF = 0x649 + SYS_SINHL = 0x650 + SYS_TANF = 0x651 + SYS_TANL = 0x652 + SYS_FABSF = 0x653 + SYS_FABSL = 0x654 + SYS_FLOORF = 0x655 + SYS_FLOORL = 0x656 + SYS_FMODF = 0x657 + SYS_FMODL = 0x658 + SYS_FREXPF = 0x659 + SYS___CHATTR = 0x660 + SYS___FCHATTR = 0x661 + SYS___TOCCSID = 0x662 + SYS___CSNAMETYPE = 0x663 + SYS___TOCSNAME = 0x664 + SYS___CCSIDTYPE = 0x665 + SYS___AE_CORRESTBL_QUERY = 0x666 + SYS___AE_AUTOCONVERT_STATE = 0x667 + SYS_DN_FIND = 0x668 + SYS___GETHOSTBYADDR_A = 0x669 + SYS___MBLEN_SB_A = 0x670 + SYS___MBLEN_STD_A = 0x671 + SYS___MBLEN_UTF = 0x672 + SYS___MBSTOWCS_A = 0x673 + SYS___MBSTOWCS_STD_A = 0x674 + SYS___MBTOWC_A = 0x675 + SYS___MBTOWC_ISO1 = 0x676 + SYS___MBTOWC_SBCS = 0x677 + SYS___MBTOWC_MBCS = 0x678 + SYS___MBTOWC_UTF = 0x679 + SYS___CSID_A = 0x680 + SYS___CSID_STD_A = 0x681 + SYS___WCSID_A = 0x682 + SYS___WCSID_STD_A = 0x683 + SYS___WCTOMB_A = 0x684 + SYS___WCTOMB_ISO1 = 0x685 + SYS___WCTOMB_STD_A = 0x686 + SYS___WCTOMB_UTF = 0x687 + SYS___WCWIDTH_A = 0x688 + SYS___GETGRNAM_R_A = 0x689 + SYS___READDIR_R_A = 0x690 + SYS___E2A_S = 0x691 + SYS___FNMATCH_A = 0x692 + SYS___FNMATCH_C_A = 0x693 + SYS___EXECL_A = 0x694 + SYS___FNMATCH_STD_A = 0x695 + SYS___REGCOMP_A = 0x696 + SYS___REGCOMP_STD_A = 0x697 + SYS___REGERROR_A = 0x698 + SYS___REGERROR_STD_A = 0x699 + SYS___SWPRINTF_A = 0x700 + SYS___FSCANF_A = 0x701 + SYS___SCANF_A = 0x702 + SYS___SSCANF_A = 0x703 + SYS___SWSCANF_A = 0x704 + SYS___ATOF_A = 0x705 + SYS___ATOI_A = 0x706 + SYS___ATOL_A = 0x707 + SYS___STRTOD_A = 0x708 + SYS___STRTOL_A = 0x709 + SYS___L64A_A = 0x710 + SYS___STRERROR_A = 0x711 + SYS___PERROR_A = 0x712 + SYS___FETCH_A = 0x713 + SYS___GETENV_A = 0x714 + SYS___MKSTEMP_A = 0x717 + SYS___PTSNAME_A = 0x718 + SYS___PUTENV_A = 0x719 + SYS___CHDIR_A = 0x720 + SYS___CHOWN_A = 0x721 + SYS___CHROOT_A = 0x722 + SYS___GETCWD_A = 0x723 + SYS___GETWD_A = 0x724 + SYS___LCHOWN_A = 0x725 + SYS___LINK_A = 0x726 + SYS___PATHCONF_A = 0x727 + SYS___IF_NAMEINDEX_A = 0x728 + SYS___READLINK_A = 0x729 + SYS___EXTLINK_NP_A = 0x730 + SYS___ISALNUM_A = 0x731 + SYS___ISALPHA_A = 0x732 + SYS___A2E_S = 0x733 + SYS___ISCNTRL_A = 0x734 + SYS___ISDIGIT_A = 0x735 + SYS___ISGRAPH_A = 0x736 + SYS___ISLOWER_A = 0x737 + SYS___ISPRINT_A = 0x738 + SYS___ISPUNCT_A = 0x739 + SYS___ISWALPHA_A = 0x740 + SYS___A2E_L = 0x741 + SYS___ISWCNTRL_A = 0x742 + SYS___ISWDIGIT_A = 0x743 + SYS___ISWGRAPH_A = 0x744 + SYS___ISWLOWER_A = 0x745 + SYS___ISWPRINT_A = 0x746 + SYS___ISWPUNCT_A = 0x747 + SYS___ISWSPACE_A = 0x748 + SYS___ISWUPPER_A = 0x749 + SYS___REMOVE_A = 0x750 + SYS___RENAME_A = 0x751 + SYS___TMPNAM_A = 0x752 + SYS___FOPEN_A = 0x753 + SYS___FREOPEN_A = 0x754 + SYS___CUSERID_A = 0x755 + SYS___POPEN_A = 0x756 + SYS___TEMPNAM_A = 0x757 + SYS___FTW_A = 0x758 + SYS___GETGRENT_A = 0x759 + SYS___INET_NTOP_A = 0x760 + SYS___GETPASS_A = 0x761 + SYS___GETPWENT_A = 0x762 + SYS___GETPWNAM_A = 0x763 + SYS___GETPWUID_A = 0x764 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 + SYS___CHECKSCHENV_A = 0x766 + SYS___CONNECTSERVER_A = 0x767 + SYS___CONNECTWORKMGR_A = 0x768 + SYS_____CONSOLE_A = 0x769 + SYS___MSGSND_A = 0x770 + SYS___MSGXRCV_A = 0x771 + SYS___NFTW_A = 0x772 + SYS_____PASSWD_A = 0x773 + SYS___PTHREAD_SECURITY_NP_A = 0x774 + SYS___QUERYMETRICS_A = 0x775 + SYS___QUERYSCHENV = 0x776 + SYS___READV_A = 0x777 + SYS_____SERVER_CLASSIFY_A = 0x778 + SYS_____SERVER_INIT_A = 0x779 + SYS___W_GETPSENT_A = 0x780 + SYS___WRITEV_A = 0x781 + SYS___W_STATFS_A = 0x782 + SYS___W_STATVFS_A = 0x783 + SYS___FPUTC_A = 0x784 + SYS___PUTCHAR_A = 0x785 + SYS___PUTS_A = 0x786 + SYS___FGETS_A = 0x787 + SYS___GETS_A = 0x788 + SYS___FPUTS_A = 0x789 + SYS___PUTC_A = 0x790 + SYS___AE_THREAD_SETMODE = 0x791 + SYS___AE_THREAD_SWAPMODE = 0x792 + SYS___GETNETBYADDR_A = 0x793 + SYS___GETNETBYNAME_A = 0x794 + SYS___GETNETENT_A = 0x795 + SYS___GETPROTOBYNAME_A = 0x796 + SYS___GETPROTOBYNUMBER_A = 0x797 + SYS___GETPROTOENT_A = 0x798 + SYS___GETSERVBYNAME_A = 0x799 + SYS_ACL_FIRST_ENTRY = 0x800 + SYS_ACL_GET_ENTRY = 0x801 + SYS_ACL_VALID = 0x802 + SYS_ACL_CREATE_ENTRY = 0x803 + SYS_ACL_DELETE_ENTRY = 0x804 + SYS_ACL_UPDATE_ENTRY = 0x805 + SYS_ACL_DELETE_FD = 0x806 + SYS_ACL_DELETE_FILE = 0x807 + SYS_ACL_GET_FD = 0x808 + SYS_ACL_GET_FILE = 0x809 + SYS___ERFL_B = 0x810 + SYS___ERFCL_B = 0x811 + SYS___LGAMMAL_B = 0x812 + SYS___SETHOOKEVENTS = 0x813 + SYS_IF_NAMETOINDEX = 0x814 + SYS_IF_INDEXTONAME = 0x815 + SYS_IF_NAMEINDEX = 0x816 + SYS_IF_FREENAMEINDEX = 0x817 + SYS_GETADDRINFO = 0x818 + SYS_GETNAMEINFO = 0x819 + SYS___DYNFREE_A = 0x820 + SYS___RES_QUERY_A = 0x821 + SYS___RES_SEARCH_A = 0x822 + SYS___RES_QUERYDOMAIN_A = 0x823 + SYS___RES_MKQUERY_A = 0x824 + SYS___RES_SEND_A = 0x825 + SYS___DN_EXPAND_A = 0x826 + SYS___DN_SKIPNAME_A = 0x827 + SYS___DN_COMP_A = 0x828 + SYS___DN_FIND_A = 0x829 + SYS___INET_NTOA_A = 0x830 + SYS___INET_NETWORK_A = 0x831 + SYS___ACCEPT_A = 0x832 + SYS___ACCEPT_AND_RECV_A = 0x833 + SYS___BIND_A = 0x834 + SYS___CONNECT_A = 0x835 + SYS___GETPEERNAME_A = 0x836 + SYS___GETSOCKNAME_A = 0x837 + SYS___RECVFROM_A = 0x838 + SYS___SENDTO_A = 0x839 + SYS___LCHATTR = 0x840 + SYS___WRITEDOWN = 0x841 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 + SYS___ACOSHF_B = 0x843 + SYS___ACOSHL_B = 0x844 + SYS___ASINHF_B = 0x845 + SYS___ASINHL_B = 0x846 + SYS___ATANHF_B = 0x847 + SYS___ATANHL_B = 0x848 + SYS___CBRTF_B = 0x849 + SYS___EXP2F_B = 0x850 + SYS___EXP2L_B = 0x851 + SYS___EXPM1F_B = 0x852 + SYS___EXPM1L_B = 0x853 + SYS___FDIMF_B = 0x854 + SYS___FDIM_B = 0x855 + SYS___FDIML_B = 0x856 + SYS___HYPOTF_B = 0x857 + SYS___HYPOTL_B = 0x858 + SYS___LOG1PF_B = 0x859 + SYS___REMQUOF_B = 0x860 + SYS___REMQUO_B = 0x861 + SYS___REMQUOL_B = 0x862 + SYS___TGAMMAF_B = 0x863 + SYS___TGAMMA_B = 0x864 + SYS___TGAMMAL_B = 0x865 + SYS___TRUNCF_B = 0x866 + SYS___TRUNC_B = 0x867 + SYS___TRUNCL_B = 0x868 + SYS___LGAMMAF_B = 0x869 + SYS_ASINHF = 0x870 + SYS_ASINHL = 0x871 + SYS_ATANHF = 0x872 + SYS_ATANHL = 0x873 + SYS_CBRTF = 0x874 + SYS_CBRTL = 0x875 + SYS_COPYSIGNF = 0x876 + SYS_CPYSIGNF = 0x876 + SYS_COPYSIGNL = 0x877 + SYS_CPYSIGNL = 0x877 + SYS_COTANF = 0x878 + SYS___COTANF = 0x878 + SYS_COTAN = 0x879 + SYS___COTAN = 0x879 + SYS_FDIM = 0x881 + SYS_FDIML = 0x882 + SYS_HYPOTF = 0x883 + SYS_HYPOTL = 0x884 + SYS_LOG1PF = 0x885 + SYS_LOG1PL = 0x886 + SYS_LOG2F = 0x887 + SYS_LOG2 = 0x888 + SYS_LOG2L = 0x889 + SYS_TGAMMA = 0x890 + SYS_TGAMMAL = 0x891 + SYS_TRUNCF = 0x892 + SYS_TRUNC = 0x893 + SYS_TRUNCL = 0x894 + SYS_LGAMMAF = 0x895 + SYS_LGAMMAL = 0x896 + SYS_LROUNDF = 0x897 + SYS_LROUND = 0x898 + SYS_ERFF = 0x899 + SYS___COSHF_H = 0x900 + SYS___COSHL_H = 0x901 + SYS___COTAN_H = 0x902 + SYS___COTANF_H = 0x903 + SYS___COTANL_H = 0x904 + SYS___ERF_H = 0x905 + SYS___ERFF_H = 0x906 + SYS___ERFL_H = 0x907 + SYS___ERFC_H = 0x908 + SYS___ERFCF_H = 0x909 + SYS___FDIMF_H = 0x910 + SYS___FDIML_H = 0x911 + SYS___FMOD_H = 0x912 + SYS___FMODF_H = 0x913 + SYS___FMODL_H = 0x914 + SYS___GAMMA_H = 0x915 + SYS___HYPOT_H = 0x916 + SYS___ILOGB_H = 0x917 + SYS___LGAMMA_H = 0x918 + SYS___LGAMMAF_H = 0x919 + SYS___LOG2L_H = 0x920 + SYS___LOG1P_H = 0x921 + SYS___LOG10_H = 0x922 + SYS___LOG10F_H = 0x923 + SYS___LOG10L_H = 0x924 + SYS___LROUND_H = 0x925 + SYS___LROUNDF_H = 0x926 + SYS___NEXTAFTER_H = 0x927 + SYS___POW_H = 0x928 + SYS___POWF_H = 0x929 + SYS___SINL_H = 0x930 + SYS___SINH_H = 0x931 + SYS___SINHF_H = 0x932 + SYS___SINHL_H = 0x933 + SYS___SQRT_H = 0x934 + SYS___SQRTF_H = 0x935 + SYS___SQRTL_H = 0x936 + SYS___TAN_H = 0x937 + SYS___TANF_H = 0x938 + SYS___TANL_H = 0x939 + SYS___TRUNCF_H = 0x940 + SYS___TRUNCL_H = 0x941 + SYS___COSH_H = 0x942 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 + SYS_VFSCANF = 0x944 + SYS_VSCANF = 0x946 + SYS_VSSCANF = 0x948 + SYS_IMAXABS = 0x950 + SYS_IMAXDIV = 0x951 + SYS_STRTOIMAX = 0x952 + SYS_STRTOUMAX = 0x953 + SYS_WCSTOIMAX = 0x954 + SYS_WCSTOUMAX = 0x955 + SYS_ATOLL = 0x956 + SYS_STRTOF = 0x957 + SYS_STRTOLD = 0x958 + SYS_WCSTOF = 0x959 + SYS_INET6_RTH_GETADDR = 0x960 + SYS_INET6_OPT_INIT = 0x961 + SYS_INET6_OPT_APPEND = 0x962 + SYS_INET6_OPT_FINISH = 0x963 + SYS_INET6_OPT_SET_VAL = 0x964 + SYS_INET6_OPT_NEXT = 0x965 + SYS_INET6_OPT_FIND = 0x966 + SYS_INET6_OPT_GET_VAL = 0x967 + SYS___POW_I = 0x987 + SYS___POW_I_B = 0x988 + SYS___POW_I_H = 0x989 + SYS___CABS_H = 0x990 + SYS_CABSF = 0x991 + SYS___CABSF_B = 0x992 + SYS___CABSF_H = 0x993 + SYS_CABSL = 0x994 + SYS___CABSL_B = 0x995 + SYS___CABSL_H = 0x996 + SYS_CACOS = 0x997 + SYS___CACOS_B = 0x998 + SYS___CACOS_H = 0x999 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 295859c503d..7a8161c1d1c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -1,6 +1,7 @@ // cgo -godefs types_aix.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc && aix // +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index a9ee0ffd44c..07ed733c51b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -1,6 +1,7 @@ // cgo -godefs types_aix.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && aix // +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 725b4bee27d..54db4333555 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && darwin // +build 386,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index bb39542f7a9..eb73e52fb68 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && darwin // +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index f2a77bc4e28..8606d654e56 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && darwin // +build arm,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index ec5b5592721..dcb51f8404d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs types_darwin.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && darwin // +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 85506a05d4b..1d049d7a122 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_dragonfly.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && dragonfly // +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 3e9dad33e33..c51bc88ffdf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && freebsd // +build 386,freebsd package unix @@ -250,6 +251,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -312,6 +321,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x50 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index e00e615544c..395b6918711 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && freebsd // +build amd64,freebsd package unix @@ -246,6 +247,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -308,6 +317,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 5da13c871b5..d3f9d2541b5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && freebsd // +build arm,freebsd package unix @@ -248,6 +249,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -310,6 +319,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x50 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 995ecf9d4e2..434d6e8e83c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && freebsd // +build arm64,freebsd package unix @@ -246,6 +247,14 @@ type RawSockaddrAny struct { type _Socklen uint32 +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + type Linger struct { Onoff int32 Linger int32 @@ -308,6 +317,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go new file mode 100644 index 00000000000..1137a5a1f4b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go @@ -0,0 +1,40 @@ +// cgo -godefs types_illumos.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build amd64 && illumos +// +build amd64,illumos + +package unix + +const ( + TUNNEWPPA = 0x540001 + TUNSETPPA = 0x540002 + + I_STR = 0x5308 + I_POP = 0x5303 + I_PUSH = 0x5302 + I_PLINK = 0x5316 + I_PUNLINK = 0x5317 + + IF_UNITSEL = -0x7ffb8cca +) + +type strbuf struct { + Maxlen int32 + Len int32 + Buf *int8 +} + +type strioctl struct { + Cmd int32 + Timout int32 + Len int32 + Dp *int8 +} + +type lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index ddd655769d5..3bfc6f7323b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,5 +1,6 @@ // Code generated by mkmerge.go; DO NOT EDIT. +//go:build linux // +build linux package unix @@ -83,7 +84,7 @@ type FileCloneRange struct { Dest_offset uint64 } -type FileDedupeRange struct { +type RawFileDedupeRange struct { Src_offset uint64 Src_length uint64 Dest_count uint16 @@ -91,6 +92,21 @@ type FileDedupeRange struct { Reserved2 uint32 } +type RawFileDedupeRangeInfo struct { + Dest_fd int64 + Dest_offset uint64 + Bytes_deduped uint64 + Status int32 + Reserved uint32 +} + +const ( + SizeofRawFileDedupeRange = 0x18 + SizeofRawFileDedupeRangeInfo = 0x20 + FILE_DEDUPE_RANGE_SAME = 0x0 + FILE_DEDUPE_RANGE_DIFFERS = 0x1 +) + type FscryptPolicy struct { Version uint8 Contents_encryption_mode uint8 @@ -3681,3 +3697,41 @@ const ( ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 0x2 ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) + +type EthtoolDrvinfo struct { + Cmd uint32 + Driver [32]byte + Version [32]byte + Fw_version [32]byte + Bus_info [32]byte + Erom_version [32]byte + Reserved2 [12]byte + N_priv_flags uint32 + N_stats uint32 + Testinfo_len uint32 + Eedump_len uint32 + Regdump_len uint32 +} + +type ( + HIDRawReportDescriptor struct { + Size uint32 + Value [4096]uint8 + } + HIDRawDevInfo struct { + Bustype uint32 + Vendor int16 + Product int16 + } +) + +const ( + CLOSE_RANGE_UNSHARE = 0x2 + CLOSE_RANGE_CLOEXEC = 0x4 +) + +const ( + NLMSGERR_ATTR_MSG = 0x1 + NLMSGERR_ATTR_OFFS = 0x2 + NLMSGERR_ATTR_COOKIE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 088bd77e3be..4d4d283de5b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && linux // +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 078d958ec95..8a2eed5ec4e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && linux // +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 2d39122f410..94b34add643 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && linux // +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 304cbd04536..2143de4d599 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && linux // +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 7d9d57006aa..a40216eee60 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips && linux // +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index a1eb2577b08..e834b069fd5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && linux // +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 2e5ce3b6a69..e31083b0489 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64le && linux // +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index bbaa1200b6a..42811f7fb55 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mipsle && linux // +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 0e6e8a77483..2a3afbaef9f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64 && linux // +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 7382f385faf..c0de30a658a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build ppc64le && linux // +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 28d55221665..74faf2e91fd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build riscv64 && linux // +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index a91a7a44bd3..9a8f0c2c6a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build s390x && linux // +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index f824b2358dc..72cdda75bde 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build sparc64 && linux // +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 3f11f88e3c6..b10e73abf95 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && netbsd // +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 0bed83af57b..28ed6d55ae3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && netbsd // +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index e4e3bf736d8..4ba196ebe56 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && netbsd // +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index efac861bb7f..dd642bd9c87 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs types_netbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && netbsd // +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 80fa295f1df..1fdb0e5fa5f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -1,6 +1,7 @@ // cgo -godefs types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build 386 && openbsd // +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 560dd6d08af..e2fc93c7c06 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && openbsd // +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 0c1700fa435..8d34b5a2fc0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm && openbsd // +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 5b3e46633e9..ea8f1a0d9ba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build arm64 && openbsd // +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 62bff167097..ec6e8bc3f13 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -1,6 +1,7 @@ // cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build mips64 && openbsd // +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ca512aff7f8..85effef9c19 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -1,6 +1,7 @@ // cgo -godefs types_solaris.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. +//go:build amd64 && solaris // +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go new file mode 100644 index 00000000000..8bffde78e58 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -0,0 +1,402 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x +// +build zos,s390x + +// Hand edited based on ztypes_linux_s390x.go +// TODO: auto-generate. + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 + PathMax = 0x1000 +) + +const ( + SizeofSockaddrAny = 128 + SizeofCmsghdr = 12 + SizeofIPMreq = 8 + SizeofIPv6Mreq = 20 + SizeofICMPv6Filter = 32 + SizeofIPv6MTUInfo = 32 + SizeofLinger = 8 + SizeofSockaddrInet4 = 16 + SizeofSockaddrInet6 = 28 + SizeofTCPInfo = 0x68 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type timeval_zos struct { //correct (with padding and all) + Sec int64 + _ [4]byte // pad + Usec int32 +} + +type Tms struct { //clock_t is 4-byte unsigned int in zos + Utime uint32 + Stime uint32 + Cutime uint32 + Cstime uint32 +} + +type Time_t int64 + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [108]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + _ [112]uint8 // pad +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Iov *Iovec + Control *byte + Flags int32 + Namelen int32 + Iovlen int32 + Controllen int32 +} + +type Cmsghdr struct { + Len int32 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Addr [4]byte /* in_addr */ + Ifindex uint32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +type _Gid_t uint32 + +type rusage_zos struct { + Utime timeval_zos + Stime timeval_zos +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +// { int, short, short } in poll.h +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +type Stat_t struct { //Linux Definition + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int64 + Blocks int64 + _ [3]int64 +} + +type Stat_LE_t struct { + _ [4]byte // eye catcher + Length uint16 + Version uint16 + Mode int32 + Ino uint32 + Dev uint32 + Nlink int32 + Uid int32 + Gid int32 + Size int64 + Atim31 [4]byte + Mtim31 [4]byte + Ctim31 [4]byte + Rdev uint32 + Auditoraudit uint32 + Useraudit uint32 + Blksize int32 + Creatim31 [4]byte + AuditID [16]byte + _ [4]byte // rsrvd1 + File_tag struct { + Ccsid uint16 + Txtflag uint16 // aggregating Txflag:1 deferred:1 rsvflags:14 + } + CharsetID [8]byte + Blocks int64 + Genvalue uint32 + Reftim31 [4]byte + Fid [8]byte + Filefmt byte + Fspflag2 byte + _ [2]byte // rsrvd2 + Ctimemsec int32 + Seclabel [8]byte + _ [4]byte // rsrvd3 + _ [4]byte // rsrvd4 + Atim Time_t + Mtim Time_t + Ctim Time_t + Creatim Time_t + Reftim Time_t + _ [24]byte // rsrvd5 +} + +type Statvfs_t struct { + ID [4]byte + Len int32 + Bsize uint64 + Blocks uint64 + Usedspace uint64 + Bavail uint64 + Flag uint64 + Maxfilesize int64 + _ [16]byte + Frsize uint64 + Bfree uint64 + Files uint32 + Ffree uint32 + Favail uint32 + Namemax31 uint32 + Invarsec uint32 + _ [4]byte + Fsid uint64 + Namemax uint64 +} + +type Statfs_t struct { + Type uint32 + Bsize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint32 + Ffree uint32 + Fsid uint64 + Namelen uint64 + Frsize uint64 + Flags uint64 +} + +type Dirent struct { + Reclen uint16 + Namlen uint16 + Ino uint32 + Extra uintptr + Name [256]byte +} + +// This struct is packed on z/OS so it can't be used directly. +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 +} + +type Termios struct { + Cflag uint32 + Iflag uint32 + Lflag uint32 + Oflag uint32 + Cc [11]uint8 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type W_Mnth struct { + Hid [4]byte + Size int32 + Cur1 int32 //32bit pointer + Cur2 int32 //^ + Devno uint32 + _ [4]byte +} + +type W_Mntent struct { + Fstype uint32 + Mode uint32 + Dev uint32 + Parentdev uint32 + Rootino uint32 + Status byte + Ddname [9]byte + Fstname [9]byte + Fsname [45]byte + Pathlen uint32 + Mountpoint [1024]byte + Jobname [8]byte + PID int32 + Parmoffset int32 + Parmlen int16 + Owner [8]byte + Quiesceowner [8]byte + _ [38]byte +} diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 3606c3a8b36..9eb1fb633a4 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -6,6 +6,11 @@ package windows +import ( + errorspkg "errors" + "unsafe" +) + // EscapeArg rewrites command line argument s as prescribed // in http://msdn.microsoft.com/en-us/library/ms880421. // This function returns "" (2 double quotes) if s is empty. @@ -95,3 +100,33 @@ func FullPath(name string) (path string, err error) { } } } + +// NewProcThreadAttributeList allocates a new ProcThreadAttributeList, with the requested maximum number of attributes. +func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeList, error) { + var size uintptr + err := initializeProcThreadAttributeList(nil, maxAttrCount, 0, &size) + if err != ERROR_INSUFFICIENT_BUFFER { + if err == nil { + return nil, errorspkg.New("unable to query buffer size from InitializeProcThreadAttributeList") + } + return nil, err + } + const psize = unsafe.Sizeof(uintptr(0)) + // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. + al := (*ProcThreadAttributeList)(unsafe.Pointer(&make([]unsafe.Pointer, (size+psize-1)/psize)[0])) + err = initializeProcThreadAttributeList(al, maxAttrCount, 0, &size) + if err != nil { + return nil, err + } + return al, err +} + +// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. +func (al *ProcThreadAttributeList) Update(attribute uintptr, flags uint32, value unsafe.Pointer, size uintptr, prevValue unsafe.Pointer, returnedSize *uintptr) error { + return updateProcThreadAttribute(al, flags, attribute, value, size, prevValue, returnedSize) +} + +// Delete frees ProcThreadAttributeList's resources. +func (al *ProcThreadAttributeList) Delete() { + deleteProcThreadAttributeList(al) +} diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash index 2163843a11d..58e0188fb71 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -9,6 +9,8 @@ shopt -s nullglob winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" [[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } +ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)" +[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; } declare -A errors @@ -59,5 +61,10 @@ declare -A errors echo "$key $vtype = $value" done < "$winerror" + while read -r line; do + [[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue + echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}" + done < "$ntstatus" + echo ")" } | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 69eb462c59a..111c10d3a7f 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -908,6 +908,19 @@ type SECURITY_DESCRIPTOR struct { dacl *ACL } +type SECURITY_QUALITY_OF_SERVICE struct { + Length uint32 + ImpersonationLevel uint32 + ContextTrackingMode byte + EffectiveOnly byte +} + +// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE. +const ( + SECURITY_STATIC_TRACKING = 0 + SECURITY_DYNAMIC_TRACKING = 1 +) + type SecurityAttributes struct { Length uint32 SecurityDescriptor *SECURITY_DESCRIPTOR @@ -1321,7 +1334,11 @@ func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURIT } func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdLen := (int)(selfRelativeSD.Length()) + sdLen := int(selfRelativeSD.Length()) + const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{})) + if sdLen < min { + sdLen = min + } var src []byte h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) @@ -1329,7 +1346,15 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() h.Len = sdLen h.Cap = sdLen - dst := make([]byte, sdLen) + const psize = int(unsafe.Sizeof(uintptr(0))) + + var dst []byte + h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + alloc := make([]uintptr, (sdLen+psize-1)/psize) + h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data + h.Len = sdLen + h.Cap = sdLen + copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 0197df87276..bb6aaf89e47 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -8,6 +8,8 @@ package windows import ( errorspkg "errors" + "fmt" + "runtime" "sync" "syscall" "time" @@ -65,9 +67,8 @@ const ( LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 - // Return values of SleepEx and other APC functions - STATUS_USER_APC = 0x000000C0 - WAIT_IO_COMPLETION = STATUS_USER_APC + // Return value of SleepEx and other APC functions + WAIT_IO_COMPLETION = 0x000000C0 ) // StringToUTF16 is deprecated. Use UTF16FromString instead. @@ -180,6 +181,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process //sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW +//sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) +//sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) @@ -208,12 +214,15 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] -//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) -//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) -//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList +//sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList +//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId @@ -248,13 +257,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW //sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) //sys FlushFileBuffers(handle Handle) (err error) //sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW //sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW //sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW //sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW -//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW //sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) //sys UnmapViewOfFile(addr uintptr) (err error) //sys FlushViewOfFile(addr uintptr, length uintptr) (err error) @@ -283,6 +293,9 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy //sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW //sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension +//sys CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) [failretval==nil] = crypt32.CertFindCertificateInStore +//sys CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) [failretval==nil] = crypt32.CertFindChainInStore +//sys CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) = crypt32.CryptAcquireCertificatePrivateKey //sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject //sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject //sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData @@ -312,14 +325,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW //sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW //sys GetCurrentThreadId() (id uint32) -//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW -//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW //sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW -//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW //sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW //sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex //sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx @@ -334,10 +347,13 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) +//sys QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -367,16 +383,36 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid //sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree -//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion -//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx +//sys CoUninitialize() = ole32.CoUninitialize +//sys CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject //sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages //sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages //sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages //sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages +//sys findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW +//sys SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource +//sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource +//sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource // Process Status API (PSAPI) //sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +// NT Native APIs +//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb +//sys RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString +//sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString +//sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile +//sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus +//sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus +//sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl +//sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess +//sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -770,6 +806,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -783,6 +820,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo //sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes //sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar @@ -1505,3 +1543,129 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } + +func (s NTStatus) Errno() syscall.Errno { + return rtlNtStatusToDosErrorNoTeb(s) +} + +func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } + +func (s NTStatus) Error() string { + b := make([]uint16, 300) + n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil) + if err != nil { + return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s)) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} + +// NewNTUnicodeString returns a new NTUnicodeString structure for use with native +// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs +// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTUnicodeString(s string) (*NTUnicodeString, error) { + var u NTUnicodeString + s16, err := UTF16PtrFromString(s) + if err != nil { + return nil, err + } + RtlInitUnicodeString(&u, s16) + return &u, nil +} + +// Slice returns a uint16 slice that aliases the data in the NTUnicodeString. +func (s *NTUnicodeString) Slice() []uint16 { + var slice []uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTUnicodeString) String() string { + return UTF16ToString(s.Slice()) +} + +// NewNTString returns a new NTString structure for use with native +// NT APIs that work over the NTString type. Note that most Windows APIs +// do not use NTString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTString(s string) (*NTString, error) { + var nts NTString + s8, err := BytePtrFromString(s) + if err != nil { + return nil, err + } + RtlInitString(&nts, s8) + return &nts, nil +} + +// Slice returns a byte slice that aliases the data in the NTString. +func (s *NTString) Slice() []byte { + var slice []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTString) String() string { + return ByteSliceToString(s.Slice()) +} + +// FindResource resolves a resource of the given name and resource type. +func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) { + var namePtr, resTypePtr uintptr + var name16, resType16 *uint16 + var err error + resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) { + switch v := i.(type) { + case string: + *keep, err = UTF16PtrFromString(v) + if err != nil { + return 0, err + } + return uintptr(unsafe.Pointer(*keep)), nil + case ResourceID: + return uintptr(v), nil + } + return 0, errorspkg.New("parameter must be a ResourceID or a string") + } + namePtr, err = resolvePtr(name, &name16) + if err != nil { + return 0, err + } + resTypePtr, err = resolvePtr(resType, &resType16) + if err != nil { + return 0, err + } + resInfo, err := findResource(module, namePtr, resTypePtr) + runtime.KeepAlive(name16) + runtime.KeepAlive(resType16) + return resInfo, err +} + +func LoadResourceData(module, resInfo Handle) (data []byte, err error) { + size, err := SizeofResource(module, resInfo) + if err != nil { + return + } + resData, err := LoadResource(module, resInfo) + if err != nil { + return + } + ptr, err := LockResource(resData) + if err != nil { + return + } + h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) + h.Data = unsafe.Pointer(ptr) + h.Len = int(size) + h.Cap = int(size) + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index fd4260762a8..23fe18ecef2 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -10,6 +10,10 @@ import ( "unsafe" ) +// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and +// other native functions. +type NTStatus uint32 + const ( // Invented values to support what package os expects. O_RDONLY = 0x00000 @@ -215,6 +219,18 @@ const ( INHERIT_PARENT_AFFINITY = 0x00010000 ) +const ( + // attributes for ProcThreadAttributeList + PROC_THREAD_ATTRIBUTE_PARENT_PROCESS = 0x00020000 + PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x00020002 + PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY = 0x00030003 + PROC_THREAD_ATTRIBUTE_PREFERRED_NODE = 0x00020004 + PROC_THREAD_ATTRIBUTE_IDEAL_PROCESSOR = 0x00030005 + PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 + PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 + PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b +) + const ( // flags for CreateToolhelp32Snapshot TH32CS_SNAPHEAPLIST = 0x01 @@ -287,6 +303,23 @@ const ( PKCS12_NO_PERSIST_KEY = 0x00008000 PKCS12_INCLUDE_EXTENDED_PROPERTIES = 0x00000010 + /* Flags for CryptAcquireCertificatePrivateKey */ + CRYPT_ACQUIRE_CACHE_FLAG = 0x00000001 + CRYPT_ACQUIRE_USE_PROV_INFO_FLAG = 0x00000002 + CRYPT_ACQUIRE_COMPARE_KEY_FLAG = 0x00000004 + CRYPT_ACQUIRE_NO_HEALING = 0x00000008 + CRYPT_ACQUIRE_SILENT_FLAG = 0x00000040 + CRYPT_ACQUIRE_WINDOW_HANDLE_FLAG = 0x00000080 + CRYPT_ACQUIRE_NCRYPT_KEY_FLAGS_MASK = 0x00070000 + CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG = 0x00010000 + CRYPT_ACQUIRE_PREFER_NCRYPT_KEY_FLAG = 0x00020000 + CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG = 0x00040000 + + /* pdwKeySpec for CryptAcquireCertificatePrivateKey */ + AT_KEYEXCHANGE = 1 + AT_SIGNATURE = 2 + CERT_NCRYPT_KEY_SPEC = 0xFFFFFFFF + /* Default usage match type is AND with value zero */ USAGE_MATCH_TYPE_AND = 0 USAGE_MATCH_TYPE_OR = 1 @@ -412,6 +445,89 @@ const ( CERT_TRUST_IS_CA_TRUSTED = 0x00004000 CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 + /* Certificate Information Flags */ + CERT_INFO_VERSION_FLAG = 1 + CERT_INFO_SERIAL_NUMBER_FLAG = 2 + CERT_INFO_SIGNATURE_ALGORITHM_FLAG = 3 + CERT_INFO_ISSUER_FLAG = 4 + CERT_INFO_NOT_BEFORE_FLAG = 5 + CERT_INFO_NOT_AFTER_FLAG = 6 + CERT_INFO_SUBJECT_FLAG = 7 + CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8 + CERT_INFO_ISSUER_UNIQUE_ID_FLAG = 9 + CERT_INFO_SUBJECT_UNIQUE_ID_FLAG = 10 + CERT_INFO_EXTENSION_FLAG = 11 + + /* dwFindType for CertFindCertificateInStore */ + CERT_COMPARE_MASK = 0xFFFF + CERT_COMPARE_SHIFT = 16 + CERT_COMPARE_ANY = 0 + CERT_COMPARE_SHA1_HASH = 1 + CERT_COMPARE_NAME = 2 + CERT_COMPARE_ATTR = 3 + CERT_COMPARE_MD5_HASH = 4 + CERT_COMPARE_PROPERTY = 5 + CERT_COMPARE_PUBLIC_KEY = 6 + CERT_COMPARE_HASH = CERT_COMPARE_SHA1_HASH + CERT_COMPARE_NAME_STR_A = 7 + CERT_COMPARE_NAME_STR_W = 8 + CERT_COMPARE_KEY_SPEC = 9 + CERT_COMPARE_ENHKEY_USAGE = 10 + CERT_COMPARE_CTL_USAGE = CERT_COMPARE_ENHKEY_USAGE + CERT_COMPARE_SUBJECT_CERT = 11 + CERT_COMPARE_ISSUER_OF = 12 + CERT_COMPARE_EXISTING = 13 + CERT_COMPARE_SIGNATURE_HASH = 14 + CERT_COMPARE_KEY_IDENTIFIER = 15 + CERT_COMPARE_CERT_ID = 16 + CERT_COMPARE_CROSS_CERT_DIST_POINTS = 17 + CERT_COMPARE_PUBKEY_MD5_HASH = 18 + CERT_COMPARE_SUBJECT_INFO_ACCESS = 19 + CERT_COMPARE_HASH_STR = 20 + CERT_COMPARE_HAS_PRIVATE_KEY = 21 + CERT_FIND_ANY = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT) + CERT_FIND_SHA1_HASH = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_MD5_HASH = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_SIGNATURE_HASH = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_KEY_IDENTIFIER = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT) + CERT_FIND_HASH = CERT_FIND_SHA1_HASH + CERT_FIND_PROPERTY = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT) + CERT_FIND_PUBLIC_KEY = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT) + CERT_FIND_SUBJECT_NAME = (CERT_COMPARE_NAME<= 0 { + s = s[:p] + } + return s } return "" } @@ -329,13 +337,13 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { // Remove the setting if value is "". if value == "" { - start, end, _ := t.findTypeForKey(key) - if start != end { - // Remove key tag and leading '-'. - start -= 4 - + start, sep, end, _ := t.findTypeForKey(key) + if start != sep { // Remove a possible empty extension. - if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' { + switch { + case t.str[start-2] != '-': // has previous elements. + case end == len(t.str), // end of string + end+2 < len(t.str) && t.str[end+2] == '-': // end of extension start -= 2 } if start == int(t.pVariant) && end == len(t.str) { @@ -381,14 +389,14 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { t.str = string(buf[:uStart+len(b)]) } else { s := t.str - start, end, hasExt := t.findTypeForKey(key) - if start == end { + start, sep, end, hasExt := t.findTypeForKey(key) + if start == sep { if hasExt { b = b[2:] } - t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:]) + t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:]) } else { - t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:]) + t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:]) } } return t, nil @@ -399,10 +407,10 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { // wasn't found. The hasExt return value reports whether an -u extension was present. // Note: the extensions are typically very small and are likely to contain // only one key-type pair. -func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { +func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { p := int(t.pExt) if len(key) != 2 || p == len(t.str) || p == 0 { - return p, p, false + return p, p, p, false } s := t.str @@ -410,10 +418,10 @@ func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { for p++; s[p] != 'u'; p++ { if s[p] > 'u' { p-- - return p, p, false + return p, p, p, false } if p = nextExtension(s, p); p == len(s) { - return len(s), len(s), false + return len(s), len(s), len(s), false } } // Proceed to the hyphen following the extension name. @@ -424,40 +432,28 @@ func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { // Iterate over keys until we get the end of a section. for { - // p points to the hyphen preceding the current token. - if p3 := p + 3; s[p3] == '-' { - // Found a key. - // Check whether we just processed the key that was requested. - if curKey == key { - return start, p, true - } - // Set to the next key and continue scanning type tokens. - curKey = s[p+1 : p3] - if curKey > key { - return p, p, true - } - // Start of the type token sequence. - start = p + 4 - // A type is at least 3 characters long. - p += 7 // 4 + 3 - } else { - // Attribute or type, which is at least 3 characters long. - p += 4 - } - // p points past the third character of a type or attribute. - max := p + 5 // maximum length of token plus hyphen. - if len(s) < max { - max = len(s) + end = p + for p++; p < len(s) && s[p] != '-'; p++ { } - for ; p < max && s[p] != '-'; p++ { + n := p - end - 1 + if n <= 2 && curKey == key { + if sep < end { + sep++ + } + return start, sep, end, true } - // Bail if we have exhausted all tokens or if the next token starts - // a new extension. - if p == len(s) || s[p+2] == '-' { - if curKey == key { - return start, p, true + switch n { + case 0, // invalid string + 1: // next extension + return end, end, end, true + case 2: + // next key + curKey = s[end+1 : p] + if curKey > key { + return end, end, end, true } - return p, p, true + start = end + sep = p } } } diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go index a2fdad89db4..c696fd0bd86 100644 --- a/vendor/golang.org/x/text/internal/language/parse.go +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -138,7 +138,7 @@ func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { b = make([]byte, n) copy(b, s.b[:oldStart]) } else { - b = s.b[:n:n] + b = s.b[:n] } copy(b[end:], s.b[oldEnd:]) s.b = b @@ -483,7 +483,7 @@ func parseExtensions(scan *scanner) int { func parseExtension(scan *scanner) int { start, end := scan.start, scan.end switch scan.token[0] { - case 'u': + case 'u': // https://www.ietf.org/rfc/rfc6067.txt attrStart := end scan.scan() for last := []byte{}; len(scan.token) > 2; scan.scan() { @@ -503,27 +503,29 @@ func parseExtension(scan *scanner) int { last = scan.token end = scan.end } + // Scan key-type sequences. A key is of length 2 and may be followed + // by 0 or more "type" subtags from 3 to the maximum of 8 letters. var last, key []byte for attrEnd := end; len(scan.token) == 2; last = key { key = scan.token - keyEnd := scan.end - end = scan.acceptMinSize(3) + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } // TODO: check key value validity - if keyEnd == end || bytes.Compare(key, last) != 1 { + if bytes.Compare(key, last) != 1 || scan.err != nil { // We have an invalid key or the keys are not sorted. // Start scanning keys from scratch and reorder. p := attrEnd + 1 scan.next = p keys := [][]byte{} for scan.scan(); len(scan.token) == 2; { - keyStart, keyEnd := scan.start, scan.end - end = scan.acceptMinSize(3) - if keyEnd != end { - keys = append(keys, scan.b[keyStart:end]) - } else { - scan.setError(ErrSyntax) - end = keyStart + keyStart := scan.start + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end } + keys = append(keys, scan.b[keyStart:end]) } sort.Stable(bytesSort{keys, 2}) if n := len(keys); n > 0 { @@ -547,7 +549,7 @@ func parseExtension(scan *scanner) int { break } } - case 't': + case 't': // https://www.ietf.org/rfc/rfc6497.txt scan.scan() if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { _, end = parseTag(scan) diff --git a/vendor/golang.org/x/text/language/go1_1.go b/vendor/golang.org/x/text/language/go1_1.go index 380f4c09f7f..c7435583b5f 100644 --- a/vendor/golang.org/x/text/language/go1_1.go +++ b/vendor/golang.org/x/text/language/go1_1.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.2 // +build !go1.2 package language diff --git a/vendor/golang.org/x/text/language/go1_2.go b/vendor/golang.org/x/text/language/go1_2.go index 38268c57a37..77aaaa299eb 100644 --- a/vendor/golang.org/x/text/language/go1_2.go +++ b/vendor/golang.org/x/text/language/go1_2.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.2 // +build go1.2 package language diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go index abfa17f66db..289b3a36d52 100644 --- a/vendor/golang.org/x/text/language/language.go +++ b/vendor/golang.org/x/text/language/language.go @@ -412,6 +412,10 @@ func (t Tag) Extensions() []Extension { // are of the allowed values defined for the Unicode locale extension ('u') in // https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. // TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. func (t Tag) TypeForKey(key string) string { if !compact.Tag(t).MayHaveExtensions() { if key != "rg" && key != "va" { diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go index 87e58a02a08..96b57f610ad 100644 --- a/vendor/golang.org/x/text/language/tables.go +++ b/vendor/golang.org/x/text/language/tables.go @@ -47,7 +47,7 @@ const ( _Zzzz = 251 ) -var regionToGroups = []uint8{ // 357 elements +var regionToGroups = []uint8{ // 358 elements // Entry 0 - 3F 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, @@ -98,8 +98,8 @@ var regionToGroups = []uint8{ // 357 elements 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, -} // Size: 381 bytes + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +} // Size: 382 bytes var paradigmLocales = [][3]uint16{ // 3 elements 0: [3]uint16{0x139, 0x0, 0x7b}, @@ -295,4 +295,4 @@ var matchRegion = []regionIntelligibility{ // 15 elements 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5}, } // Size: 114 bytes -// Total table size 1471 bytes (1KiB); checksum: 4CB1CD46 +// Total table size 1472 bytes (1KiB); checksum: F86C669 diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index e4c62289f90..8a7392c4a16 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.10 // +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index 02b9e1e9d4c..bb0a920018c 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.10 // +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index d8c94e1bd1a..42fa8d72cec 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 16b11db5388..56a0e1ea216 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 647f2d4279e..baacf32b43c 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index c937d0976fe..f248effae17 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index 0ca0193ebe2..f517fdb202a 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 26fbd55a124..f5a0788277f 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 2c58f09baa4..cb7239c4377 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 7e1ae096e5c..11b27330017 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 9ea1b421407..96a130d30e9 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 94290692913..0175eae50aa 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package norm diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go index decb8e48093..186b1d4efac 100644 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package width diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go index 3c75e428fd0..990f7622f17 100644 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package width diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 543942b9e78..85296297e38 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go index 804264ca67d..bac3f1aee34 100644 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go index 7069e26345b..b3db84f6f9b 100644 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package width diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index 65615e67b3a..5d1cdf2b751 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -3,7 +3,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "description": "See, edit, configure, and delete your Google Cloud Platform data" }, "https://www.googleapis.com/auth/cloud-platform.read-only": { "description": "View your data across Google Cloud Platform services" @@ -332,7 +332,7 @@ ] }, "get": { - "description": "Retrieve a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission requires permission `resourcemanager.projects.get` or `resourcemanager.projects.updateLiens`.", + "description": "Retrieve a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission `resourcemanager.projects.get`", "flatPath": "v1/liens/{liensId}", "httpMethod": "GET", "id": "cloudresourcemanager.liens.get", @@ -1171,7 +1171,7 @@ } } }, - "revision": "20210114", + "revision": "20210328", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1425,7 +1425,7 @@ "type": "string" }, "gettable": { - "description": "True if the project can be retrieved using GetProject. No other operations on the project are guaranteed to work until the project creation is complete.", + "description": "True if the project can be retrieved using `GetProject`. No other operations on the project are guaranteed to work until the project creation is complete.", "type": "boolean" }, "ready": { @@ -1448,19 +1448,19 @@ "type": "object" }, "DeleteFolderMetadata": { - "description": "A status object which is used as the `metadata` field for the Operation returned by DeleteFolder.", + "description": "A status object which is used as the `metadata` field for the `Operation` returned by `DeleteFolder`.", "id": "DeleteFolderMetadata", "properties": {}, "type": "object" }, "DeleteOrganizationMetadata": { - "description": "A status object which is used as the `metadata` field for the Operation returned by DeleteOrganization.", + "description": "A status object which is used as the `metadata` field for the operation returned by DeleteOrganization.", "id": "DeleteOrganizationMetadata", "properties": {}, "type": "object" }, "DeleteProjectMetadata": { - "description": "A status object which is used as the `metadata` field for the Operation returned by DeleteProject.", + "description": "A status object which is used as the `metadata` field for the Operation returned by `DeleteProject`.", "id": "DeleteProjectMetadata", "properties": {}, "type": "object" @@ -1837,7 +1837,7 @@ "type": "object" }, "MoveFolderMetadata": { - "description": "Metadata pertaining to the Folder move process.", + "description": "Metadata pertaining to the folder move process.", "id": "MoveFolderMetadata", "properties": { "destinationParent": { @@ -2228,7 +2228,7 @@ "type": "object" }, "UndeleteFolderMetadata": { - "description": "A status object which is used as the `metadata` field for the Operation returned by UndeleteFolder.", + "description": "A status object which is used as the `metadata` field for the `Operation` returned by `UndeleteFolder`.", "id": "UndeleteFolderMetadata", "properties": {}, "type": "object" @@ -2240,7 +2240,7 @@ "type": "object" }, "UndeleteProjectMetadata": { - "description": "A status object which is used as the `metadata` field for the Operation returned by UndeleteProject.", + "description": "A status object which is used as the `metadata` field for the Operation returned by `UndeleteProject`.", "id": "UndeleteProjectMetadata", "properties": {}, "type": "object" diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 0ecb8127981..ecffc71b5f0 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -83,7 +83,7 @@ const mtlsBasePath = "https://cloudresourcemanager.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( - // View and manage your data across Google Cloud Platform services + // See, edit, configure, and delete your Google Cloud Platform data CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View your data across Google Cloud Platform services @@ -698,7 +698,7 @@ type CreateProjectMetadata struct { // CreateTime: Creation time of the project creation workflow. CreateTime string `json:"createTime,omitempty"` - // Gettable: True if the project can be retrieved using GetProject. No + // Gettable: True if the project can be retrieved using `GetProject`. No // other operations on the project are guaranteed to work until the // project creation is complete. Gettable bool `json:"gettable,omitempty"` @@ -740,17 +740,17 @@ type CreateTagValueMetadata struct { } // DeleteFolderMetadata: A status object which is used as the `metadata` -// field for the Operation returned by DeleteFolder. +// field for the `Operation` returned by `DeleteFolder`. type DeleteFolderMetadata struct { } // DeleteOrganizationMetadata: A status object which is used as the -// `metadata` field for the Operation returned by DeleteOrganization. +// `metadata` field for the operation returned by DeleteOrganization. type DeleteOrganizationMetadata struct { } // DeleteProjectMetadata: A status object which is used as the -// `metadata` field for the Operation returned by DeleteProject. +// `metadata` field for the Operation returned by `DeleteProject`. type DeleteProjectMetadata struct { } @@ -1560,7 +1560,7 @@ func (s *ListProjectsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MoveFolderMetadata: Metadata pertaining to the Folder move process. +// MoveFolderMetadata: Metadata pertaining to the folder move process. type MoveFolderMetadata struct { // DestinationParent: The resource name of the folder or organization to // move the folder to. @@ -2367,7 +2367,7 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { } // UndeleteFolderMetadata: A status object which is used as the -// `metadata` field for the Operation returned by UndeleteFolder. +// `metadata` field for the `Operation` returned by `UndeleteFolder`. type UndeleteFolderMetadata struct { } @@ -2377,7 +2377,7 @@ type UndeleteOrganizationMetadata struct { } // UndeleteProjectMetadata: A status object which is used as the -// `metadata` field for the Operation returned by UndeleteProject. +// `metadata` field for the Operation returned by `UndeleteProject`. type UndeleteProjectMetadata struct { } @@ -2418,6 +2418,8 @@ type FoldersClearOrgPolicyCall struct { } // ClearOrgPolicy: Clears a `Policy` from a resource. +// +// - resource: Name of the resource for the `Policy` to clear. func (r *FoldersService) ClearOrgPolicy(resource string, clearorgpolicyrequest *ClearOrgPolicyRequest) *FoldersClearOrgPolicyCall { c := &FoldersClearOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2452,7 +2454,7 @@ func (c *FoldersClearOrgPolicyCall) Header() http.Header { func (c *FoldersClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2562,6 +2564,9 @@ type FoldersGetEffectiveOrgPolicyCall struct { // The returned `Policy` will not have an `etag`set because it is a // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. +// +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *FoldersService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *FoldersGetEffectiveOrgPolicyCall { c := &FoldersGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2596,7 +2601,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Header() http.Header { func (c *FoldersGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2707,6 +2712,8 @@ type FoldersGetOrgPolicyCall struct { // `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value // can be used with `SetOrgPolicy()` to create or update a `Policy` // during read-modify-write. +// +// - resource: Name of the resource the `Policy` is set on. func (r *FoldersService) GetOrgPolicy(resource string, getorgpolicyrequest *GetOrgPolicyRequest) *FoldersGetOrgPolicyCall { c := &FoldersGetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2741,7 +2748,7 @@ func (c *FoldersGetOrgPolicyCall) Header() http.Header { func (c *FoldersGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2849,6 +2856,8 @@ type FoldersListAvailableOrgPolicyConstraintsCall struct { // ListAvailableOrgPolicyConstraints: Lists `Constraints` that could be // applied on the specified resource. +// +// - resource: Name of the resource to list `Constraints` for. func (r *FoldersService) ListAvailableOrgPolicyConstraints(resource string, listavailableorgpolicyconstraintsrequest *ListAvailableOrgPolicyConstraintsRequest) *FoldersListAvailableOrgPolicyConstraintsCall { c := &FoldersListAvailableOrgPolicyConstraintsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2883,7 +2892,7 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *FoldersListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3014,6 +3023,8 @@ type FoldersListOrgPoliciesCall struct { // ListOrgPolicies: Lists all the `Policies` set for a particular // resource. +// +// - resource: Name of the resource to list Policies for. func (r *FoldersService) ListOrgPolicies(resource string, listorgpoliciesrequest *ListOrgPoliciesRequest) *FoldersListOrgPoliciesCall { c := &FoldersListOrgPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3048,7 +3059,7 @@ func (c *FoldersListOrgPoliciesCall) Header() http.Header { func (c *FoldersListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3179,6 +3190,8 @@ type FoldersSetOrgPolicyCall struct { // a new `Policy` for that `Constraint` on the resource if one does not // exist. Not supplying an `etag` on the request `Policy` results in an // unconditional write of the `Policy`. +// +// - resource: Resource name of the resource to attach the `Policy`. func (r *FoldersService) SetOrgPolicy(resource string, setorgpolicyrequest *SetOrgPolicyRequest) *FoldersSetOrgPolicyCall { c := &FoldersSetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -3213,7 +3226,7 @@ func (c *FoldersSetOrgPolicyCall) Header() http.Header { func (c *FoldersSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3355,7 +3368,7 @@ func (c *LiensCreateCall) Header() http.Header { func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3451,6 +3464,8 @@ type LiensDeleteCall struct { // permission on the `parent` resource. For example, a Lien with a // `parent` of `projects/1234` requires permission // `resourcemanager.projects.updateLiens`. +// +// - name: The name/identifier of the Lien to delete. func (r *LiensService) Delete(nameid string) *LiensDeleteCall { c := &LiensDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.nameid = nameid @@ -3484,7 +3499,7 @@ func (c *LiensDeleteCall) Header() http.Header { func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3584,9 +3599,10 @@ type LiensGetCall struct { // Get: Retrieve a Lien by `name`. Callers of this method will require // permission on the `parent` resource. For example, a Lien with a -// `parent` of `projects/1234` requires permission requires permission -// `resourcemanager.projects.get` or -// `resourcemanager.projects.updateLiens`. +// `parent` of `projects/1234` requires permission +// `resourcemanager.projects.get` +// +// - name: The name/identifier of the Lien. func (r *LiensService) Get(nameid string) *LiensGetCall { c := &LiensGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.nameid = nameid @@ -3630,7 +3646,7 @@ func (c *LiensGetCall) Header() http.Header { func (c *LiensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3692,7 +3708,7 @@ func (c *LiensGetCall) Do(opts ...googleapi.CallOption) (*Lien, error) { } return ret, nil // { - // "description": "Retrieve a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission requires permission `resourcemanager.projects.get` or `resourcemanager.projects.updateLiens`.", + // "description": "Retrieve a Lien by `name`. Callers of this method will require permission on the `parent` resource. For example, a Lien with a `parent` of `projects/1234` requires permission `resourcemanager.projects.get`", // "flatPath": "v1/liens/{liensId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.liens.get", @@ -3801,7 +3817,7 @@ func (c *LiensListCall) Header() http.Header { func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3930,6 +3946,8 @@ type OperationsGetCall struct { // Get: Gets the latest state of a long-running operation. Clients can // use this method to poll the operation result at intervals as // recommended by the API service. +// +// - name: The name of the operation resource. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3973,7 +3991,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4075,6 +4093,8 @@ type OrganizationsClearOrgPolicyCall struct { } // ClearOrgPolicy: Clears a `Policy` from a resource. +// +// - resource: Name of the resource for the `Policy` to clear. func (r *OrganizationsService) ClearOrgPolicy(resource string, clearorgpolicyrequest *ClearOrgPolicyRequest) *OrganizationsClearOrgPolicyCall { c := &OrganizationsClearOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4109,7 +4129,7 @@ func (c *OrganizationsClearOrgPolicyCall) Header() http.Header { func (c *OrganizationsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4216,6 +4236,11 @@ type OrganizationsGetCall struct { // Get: Fetches an Organization resource identified by the specified // resource name. +// +// - name: The resource name of the Organization to fetch. This is the +// organization's relative path in the API, formatted as +// "organizations/[organizationId]". For example, +// "organizations/1234". func (r *OrganizationsService) Get(name string) *OrganizationsGetCall { c := &OrganizationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4259,7 +4284,7 @@ func (c *OrganizationsGetCall) Header() http.Header { func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4365,6 +4390,9 @@ type OrganizationsGetEffectiveOrgPolicyCall struct { // The returned `Policy` will not have an `etag`set because it is a // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. +// +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *OrganizationsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *OrganizationsGetEffectiveOrgPolicyCall { c := &OrganizationsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4399,7 +4427,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4511,6 +4539,10 @@ type OrganizationsGetIamPolicyCall struct { // "organizations/123". Authorization requires the Google IAM permission // `resourcemanager.organizations.getIamPolicy` on the specified // organization +// +// - resource: REQUIRED: The resource for which the policy is being +// requested. See the operation documentation for the appropriate +// value for this field. func (r *OrganizationsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *OrganizationsGetIamPolicyCall { c := &OrganizationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4545,7 +4577,7 @@ func (c *OrganizationsGetIamPolicyCall) Header() http.Header { func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4656,6 +4688,8 @@ type OrganizationsGetOrgPolicyCall struct { // `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value // can be used with `SetOrgPolicy()` to create or update a `Policy` // during read-modify-write. +// +// - resource: Name of the resource the `Policy` is set on. func (r *OrganizationsService) GetOrgPolicy(resource string, getorgpolicyrequest *GetOrgPolicyRequest) *OrganizationsGetOrgPolicyCall { c := &OrganizationsGetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4690,7 +4724,7 @@ func (c *OrganizationsGetOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4798,6 +4832,8 @@ type OrganizationsListAvailableOrgPolicyConstraintsCall struct { // ListAvailableOrgPolicyConstraints: Lists `Constraints` that could be // applied on the specified resource. +// +// - resource: Name of the resource to list `Constraints` for. func (r *OrganizationsService) ListAvailableOrgPolicyConstraints(resource string, listavailableorgpolicyconstraintsrequest *ListAvailableOrgPolicyConstraintsRequest) *OrganizationsListAvailableOrgPolicyConstraintsCall { c := &OrganizationsListAvailableOrgPolicyConstraintsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4832,7 +4868,7 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Header() http.Heade func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4963,6 +4999,8 @@ type OrganizationsListOrgPoliciesCall struct { // ListOrgPolicies: Lists all the `Policies` set for a particular // resource. +// +// - resource: Name of the resource to list Policies for. func (r *OrganizationsService) ListOrgPolicies(resource string, listorgpoliciesrequest *ListOrgPoliciesRequest) *OrganizationsListOrgPoliciesCall { c := &OrganizationsListOrgPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4997,7 +5035,7 @@ func (c *OrganizationsListOrgPoliciesCall) Header() http.Header { func (c *OrganizationsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5161,7 +5199,7 @@ func (c *OrganizationsSearchCall) Header() http.Header { func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5281,6 +5319,10 @@ type OrganizationsSetIamPolicyCall struct { // Authorization requires the Google IAM permission // `resourcemanager.organizations.setIamPolicy` on the specified // organization +// +// - resource: REQUIRED: The resource for which the policy is being +// specified. See the operation documentation for the appropriate +// value for this field. func (r *OrganizationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsSetIamPolicyCall { c := &OrganizationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5315,7 +5357,7 @@ func (c *OrganizationsSetIamPolicyCall) Header() http.Header { func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5424,6 +5466,8 @@ type OrganizationsSetOrgPolicyCall struct { // a new `Policy` for that `Constraint` on the resource if one does not // exist. Not supplying an `etag` on the request `Policy` results in an // unconditional write of the `Policy`. +// +// - resource: Resource name of the resource to attach the `Policy`. func (r *OrganizationsService) SetOrgPolicy(resource string, setorgpolicyrequest *SetOrgPolicyRequest) *OrganizationsSetOrgPolicyCall { c := &OrganizationsSetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5458,7 +5502,7 @@ func (c *OrganizationsSetOrgPolicyCall) Header() http.Header { func (c *OrganizationsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5567,6 +5611,10 @@ type OrganizationsTestIamPermissionsCall struct { // specified Organization. The `resource` field should be the // organization's resource name, e.g. "organizations/123". There are no // permissions required for making this API call. +// +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See the operation documentation for the +// appropriate value for this field. func (r *OrganizationsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsTestIamPermissionsCall { c := &OrganizationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5601,7 +5649,7 @@ func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5708,6 +5756,8 @@ type ProjectsClearOrgPolicyCall struct { } // ClearOrgPolicy: Clears a `Policy` from a resource. +// +// - resource: Name of the resource for the `Policy` to clear. func (r *ProjectsService) ClearOrgPolicy(resource string, clearorgpolicyrequest *ClearOrgPolicyRequest) *ProjectsClearOrgPolicyCall { c := &ProjectsClearOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5742,7 +5792,7 @@ func (c *ProjectsClearOrgPolicyCall) Header() http.Header { func (c *ProjectsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5891,7 +5941,7 @@ func (c *ProjectsCreateCall) Header() http.Header { func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5994,6 +6044,8 @@ type ProjectsDeleteCall struct { // deletion completes, the Project is not retrievable by the GetProject // and ListProjects methods. The caller must have delete permissions for // this Project. +// +// - projectId: The Project ID (for example, `foo-bar-123`). func (r *ProjectsService) Delete(projectId string) *ProjectsDeleteCall { c := &ProjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -6027,7 +6079,7 @@ func (c *ProjectsDeleteCall) Header() http.Header { func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6126,6 +6178,8 @@ type ProjectsGetCall struct { // Get: Retrieves the Project identified by the specified `project_id` // (for example, `my-project-123`). The caller must have read // permissions for this Project. +// +// - projectId: The Project ID (for example, `my-project-123`). func (r *ProjectsService) Get(projectId string) *ProjectsGetCall { c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -6169,7 +6223,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6273,6 +6327,8 @@ type ProjectsGetAncestryCall struct { // the Project identified by the specified `project_id` (for example, // `my-project-123`). The caller must have read permissions for this // Project. +// +// - projectId: The Project ID (for example, `my-project-123`). func (r *ProjectsService) GetAncestry(projectId string, getancestryrequest *GetAncestryRequest) *ProjectsGetAncestryCall { c := &ProjectsGetAncestryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -6307,7 +6363,7 @@ func (c *ProjectsGetAncestryCall) Header() http.Header { func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6417,6 +6473,9 @@ type ProjectsGetEffectiveOrgPolicyCall struct { // The returned `Policy` will not have an `etag`set because it is a // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. +// +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *ProjectsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *ProjectsGetEffectiveOrgPolicyCall { c := &ProjectsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6451,7 +6510,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *ProjectsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6564,6 +6623,10 @@ type ProjectsGetIamPolicyCall struct { // additional information about `resource` (e.g. my-project-id) // structure and identification, see Resource Names // (https://cloud.google.com/apis/design/resource_names). +// +// - resource: REQUIRED: The resource for which the policy is being +// requested. See the operation documentation for the appropriate +// value for this field. func (r *ProjectsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsGetIamPolicyCall { c := &ProjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6598,7 +6661,7 @@ func (c *ProjectsGetIamPolicyCall) Header() http.Header { func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6708,6 +6771,8 @@ type ProjectsGetOrgPolicyCall struct { // `POLICY_TYPE_NOT_SET` for the `policy_type oneof`. The `etag` value // can be used with `SetOrgPolicy()` to create or update a `Policy` // during read-modify-write. +// +// - resource: Name of the resource the `Policy` is set on. func (r *ProjectsService) GetOrgPolicy(resource string, getorgpolicyrequest *GetOrgPolicyRequest) *ProjectsGetOrgPolicyCall { c := &ProjectsGetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6742,7 +6807,7 @@ func (c *ProjectsGetOrgPolicyCall) Header() http.Header { func (c *ProjectsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6950,7 +7015,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7078,6 +7143,8 @@ type ProjectsListAvailableOrgPolicyConstraintsCall struct { // ListAvailableOrgPolicyConstraints: Lists `Constraints` that could be // applied on the specified resource. +// +// - resource: Name of the resource to list `Constraints` for. func (r *ProjectsService) ListAvailableOrgPolicyConstraints(resource string, listavailableorgpolicyconstraintsrequest *ListAvailableOrgPolicyConstraintsRequest) *ProjectsListAvailableOrgPolicyConstraintsCall { c := &ProjectsListAvailableOrgPolicyConstraintsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7112,7 +7179,7 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *ProjectsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7243,6 +7310,8 @@ type ProjectsListOrgPoliciesCall struct { // ListOrgPolicies: Lists all the `Policies` set for a particular // resource. +// +// - resource: Name of the resource to list Policies for. func (r *ProjectsService) ListOrgPolicies(resource string, listorgpoliciesrequest *ListOrgPoliciesRequest) *ProjectsListOrgPoliciesCall { c := &ProjectsListOrgPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7277,7 +7346,7 @@ func (c *ProjectsListOrgPoliciesCall) Header() http.Header { func (c *ProjectsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7441,6 +7510,10 @@ type ProjectsSetIamPolicyCall struct { // lack of a ToS-accepting owner is rectified. Authorization requires // the Google IAM permission `resourcemanager.projects.setIamPolicy` on // the project +// +// - resource: REQUIRED: The resource for which the policy is being +// specified. See the operation documentation for the appropriate +// value for this field. func (r *ProjectsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSetIamPolicyCall { c := &ProjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7475,7 +7548,7 @@ func (c *ProjectsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7583,6 +7656,8 @@ type ProjectsSetOrgPolicyCall struct { // a new `Policy` for that `Constraint` on the resource if one does not // exist. Not supplying an `etag` on the request `Policy` results in an // unconditional write of the `Policy`. +// +// - resource: Resource name of the resource to attach the `Policy`. func (r *ProjectsService) SetOrgPolicy(resource string, setorgpolicyrequest *SetOrgPolicyRequest) *ProjectsSetOrgPolicyCall { c := &ProjectsSetOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7617,7 +7692,7 @@ func (c *ProjectsSetOrgPolicyCall) Header() http.Header { func (c *ProjectsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7727,6 +7802,10 @@ type ProjectsTestIamPermissionsCall struct { // my-project-id) structure and identification, see Resource Names // (https://cloud.google.com/apis/design/resource_names). There are no // permissions required for making this API call. +// +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See the operation documentation for the +// appropriate value for this field. func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall { c := &ProjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7761,7 +7840,7 @@ func (c *ProjectsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7871,6 +7950,8 @@ type ProjectsUndeleteCall struct { // method for a Project that has a lifecycle state of DELETE_REQUESTED. // After deletion starts, the Project cannot be restored. The caller // must have undelete permissions for this Project. +// +// - projectId: The project ID (for example, `foo-bar-123`). func (r *ProjectsService) Undelete(projectId string, undeleteprojectrequest *UndeleteProjectRequest) *ProjectsUndeleteCall { c := &ProjectsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -7905,7 +7986,7 @@ func (c *ProjectsUndeleteCall) Header() http.Header { func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8012,6 +8093,8 @@ type ProjectsUpdateCall struct { // Update: Updates the attributes of the Project identified by the // specified `project_id` (for example, `my-project-123`). The caller // must have modify permissions for this Project. +// +// - projectId: The project ID (for example, `my-project-123`). func (r *ProjectsService) Update(projectId string, project *Project) *ProjectsUpdateCall { c := &ProjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8046,7 +8129,7 @@ func (c *ProjectsUpdateCall) Header() http.Header { func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 0075e7224cc..1970d8fee71 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -3,7 +3,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "description": "See, edit, configure, and delete your Google Cloud Platform data" }, "https://www.googleapis.com/auth/compute": { "description": "View and manage your Google Compute Engine resources" @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"-2NioU2H8y8siEzrBOV_qzRI6kQ/mIZBtT5XI04WAMcpQOOpJwzHbMk\"", + "etag": "\"uWj2hSb4GVjzdDlAnRd2gbM1ZQ8/hFkNRjc1rr89Q6jONgNcfqWVWho\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -132,7 +132,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -229,7 +229,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -299,7 +299,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -490,7 +490,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -553,7 +553,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -737,7 +737,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -1085,7 +1085,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -1270,7 +1270,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -1509,7 +1509,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -1566,7 +1566,7 @@ ] }, "setSecurityPolicy": { - "description": "Sets the security policy for the specified backend service.", + "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", "httpMethod": "POST", "id": "compute.backendServices.setSecurityPolicy", "parameterOrder": [ @@ -1694,7 +1694,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -1791,7 +1791,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -1910,7 +1910,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -2200,7 +2200,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -2606,7 +2606,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -3017,7 +3017,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -3435,7 +3435,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -3580,7 +3580,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -3771,7 +3771,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -4081,7 +4081,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -4244,7 +4244,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -4603,7 +4603,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -4664,7 +4664,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -4727,7 +4727,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -4845,7 +4845,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -4990,7 +4990,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -5007,6 +5007,210 @@ } } }, + "globalPublicDelegatedPrefixes": { + "methods": { + "delete": { + "description": "Deletes the specified global PublicDelegatedPrefix.", + "httpMethod": "DELETE", + "id": "compute.globalPublicDelegatedPrefixes.delete", + "parameterOrder": [ + "project", + "publicDelegatedPrefix" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified global PublicDelegatedPrefix resource.", + "httpMethod": "GET", + "id": "compute.globalPublicDelegatedPrefixes.get", + "parameterOrder": [ + "project", + "publicDelegatedPrefix" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", + "response": { + "$ref": "PublicDelegatedPrefix" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a global PublicDelegatedPrefix in the specified project using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.globalPublicDelegatedPrefixes.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/publicDelegatedPrefixes", + "request": { + "$ref": "PublicDelegatedPrefix" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists the global PublicDelegatedPrefixes for a project.", + "httpMethod": "GET", + "id": "compute.globalPublicDelegatedPrefixes.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/publicDelegatedPrefixes", + "response": { + "$ref": "PublicDelegatedPrefixList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified global PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.globalPublicDelegatedPrefixes.patch", + "parameterOrder": [ + "project", + "publicDelegatedPrefix" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", + "request": { + "$ref": "PublicDelegatedPrefix" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "healthChecks": { "methods": { "aggregatedList": { @@ -5053,7 +5257,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -5212,7 +5416,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -5457,7 +5661,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -5702,7 +5906,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -6070,7 +6274,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -6330,7 +6534,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -6687,7 +6891,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -6755,7 +6959,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -6823,7 +7027,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -6891,7 +7095,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -7007,7 +7211,7 @@ ] }, "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.recreateInstances", "parameterOrder": [ @@ -7341,7 +7545,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -7520,7 +7724,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -7588,7 +7792,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -7894,7 +8098,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -8135,7 +8339,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -8204,83 +8408,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "delete": { - "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - "httpMethod": "DELETE", - "id": "compute.instances.delete", - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "parameters": { - "instance": { - "description": "Name of the instance resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/zones/{zone}/instances/{instance}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "deleteAccessConfig": { - "description": "Deletes an access config from an instance's network interface.", + "bulkInsert": { + "description": "Creates multiple instances. Count specifies the number of instances to create.", "httpMethod": "POST", - "id": "compute.instances.deleteAccessConfig", + "id": "compute.instances.bulkInsert", "parameterOrder": [ "project", - "zone", - "instance", - "accessConfig", - "networkInterface" + "zone" ], "parameters": { - "accessConfig": { - "description": "The name of the access config to delete.", - "location": "query", - "required": true, - "type": "string" - }, - "instance": { - "description": "The instance name for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "networkInterface": { - "description": "The name of the network interface.", - "location": "query", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -8301,7 +8437,10 @@ "type": "string" } }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + "path": "projects/{project}/zones/{zone}/instances/bulkInsert", + "request": { + "$ref": "BulkInsertInstanceResource" + }, "response": { "$ref": "Operation" }, @@ -8310,25 +8449,131 @@ "https://www.googleapis.com/auth/compute" ] }, - "detachDisk": { - "description": "Detaches a disk from an instance.", - "httpMethod": "POST", - "id": "compute.instances.detachDisk", + "delete": { + "description": "Deletes the specified Instance resource. For more information, see Deleting an instance.", + "httpMethod": "DELETE", + "id": "compute.instances.delete", "parameterOrder": [ "project", "zone", - "instance", - "deviceName" + "instance" ], "parameters": { - "deviceName": { - "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", - "location": "query", - "required": true, - "type": "string" - }, "instance": { - "description": "Instance name for this request.", + "description": "Name of the instance resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteAccessConfig": { + "description": "Deletes an access config from an instance's network interface.", + "httpMethod": "POST", + "id": "compute.instances.deleteAccessConfig", + "parameterOrder": [ + "project", + "zone", + "instance", + "accessConfig", + "networkInterface" + ], + "parameters": { + "accessConfig": { + "description": "The name of the access config to delete.", + "location": "query", + "required": true, + "type": "string" + }, + "instance": { + "description": "The instance name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "networkInterface": { + "description": "The name of the network interface.", + "location": "query", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "detachDisk": { + "description": "Detaches a disk from an instance.", + "httpMethod": "POST", + "id": "compute.instances.detachDisk", + "parameterOrder": [ + "project", + "zone", + "instance", + "deviceName" + ], + "parameters": { + "deviceName": { + "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", + "location": "query", + "required": true, + "type": "string" + }, + "instance": { + "description": "Instance name for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -8405,6 +8650,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getEffectiveFirewalls": { + "description": "Returns effective firewalls applied to an interface of the instance.", + "httpMethod": "GET", + "id": "compute.instances.getEffectiveFirewalls", + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "networkInterface": { + "description": "The name of the network interface to get the effective firewalls.", + "location": "query", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", + "response": { + "$ref": "InstancesGetEffectiveFirewallsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getGuestAttributes": { "description": "Returns the specified guest attributes entry.", "httpMethod": "GET", @@ -8732,7 +9026,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -8802,7 +9096,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -10085,7 +10379,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -10281,7 +10575,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -10422,7 +10716,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -10619,7 +10913,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -10941,7 +11235,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -11077,7 +11371,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -11174,7 +11468,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -11244,7 +11538,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -11517,7 +11811,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -11585,7 +11879,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -11771,6 +12065,40 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getEffectiveFirewalls": { + "description": "Returns the effective firewalls on a given network.", + "httpMethod": "GET", + "id": "compute.networks.getEffectiveFirewalls", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", + "response": { + "$ref": "NetworksGetEffectiveFirewallsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a network in the specified project using the data included in the request.", "httpMethod": "POST", @@ -11843,7 +12171,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -11928,7 +12256,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -12201,7 +12529,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -12490,7 +12818,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -12560,7 +12888,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -12817,7 +13145,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -13056,7 +13384,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -13208,7 +13536,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -13305,7 +13633,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -13375,7 +13703,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -13566,7 +13894,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -13895,7 +14223,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -13948,7 +14276,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -14135,25 +14463,17 @@ } } }, - "regionAutoscalers": { + "publicAdvertisedPrefixes": { "methods": { "delete": { - "description": "Deletes the specified autoscaler.", + "description": "Deletes the specified PublicAdvertisedPrefix", "httpMethod": "DELETE", - "id": "compute.regionAutoscalers.delete", + "id": "compute.publicAdvertisedPrefixes.delete", "parameterOrder": [ "project", - "region", - "autoscaler" + "publicAdvertisedPrefix" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14161,10 +14481,10 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "publicAdvertisedPrefix": { + "description": "Name of the PublicAdvertisedPrefix resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -14174,7 +14494,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", "response": { "$ref": "Operation" }, @@ -14184,22 +14504,14 @@ ] }, "get": { - "description": "Returns the specified autoscaler.", + "description": "Returns the specified PublicAdvertisedPrefix resource.", "httpMethod": "GET", - "id": "compute.regionAutoscalers.get", + "id": "compute.publicAdvertisedPrefixes.get", "parameterOrder": [ "project", - "region", - "autoscaler" + "publicAdvertisedPrefix" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14207,17 +14519,17 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "publicAdvertisedPrefix": { + "description": "Name of the PublicAdvertisedPrefix resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", "response": { - "$ref": "Autoscaler" + "$ref": "PublicAdvertisedPrefix" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14226,12 +14538,11 @@ ] }, "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request.", + "description": "Creates a PublicAdvertisedPrefix in the specified project using the parameters that are included in the request.", "httpMethod": "POST", - "id": "compute.regionAutoscalers.insert", + "id": "compute.publicAdvertisedPrefixes.insert", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "project": { @@ -14241,22 +14552,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers", + "path": "projects/{project}/global/publicAdvertisedPrefixes", "request": { - "$ref": "Autoscaler" + "$ref": "PublicAdvertisedPrefix" }, "response": { "$ref": "Operation" @@ -14267,12 +14571,11 @@ ] }, "list": { - "description": "Retrieves a list of autoscalers contained within the specified region.", + "description": "Lists the PublicAdvertisedPrefixes for a project.", "httpMethod": "GET", - "id": "compute.regionAutoscalers.list", + "id": "compute.publicAdvertisedPrefixes.list", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { @@ -14305,22 +14608,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/autoscalers", + "path": "projects/{project}/global/publicAdvertisedPrefixes", "response": { - "$ref": "RegionAutoscalerList" + "$ref": "PublicAdvertisedPrefixList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14329,20 +14625,14 @@ ] }, "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", "httpMethod": "PATCH", - "id": "compute.regionAutoscalers.patch", + "id": "compute.publicAdvertisedPrefixes.patch", "parameterOrder": [ "project", - "region" + "publicAdvertisedPrefix" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to patch.", - "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14350,10 +14640,10 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "publicAdvertisedPrefix": { + "description": "Name of the PublicAdvertisedPrefix resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -14363,9 +14653,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers", + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", "request": { - "$ref": "Autoscaler" + "$ref": "PublicAdvertisedPrefix" }, "response": { "$ref": "Operation" @@ -14374,84 +14664,96 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request.", - "httpMethod": "PUT", - "id": "compute.regionAutoscalers.update", + } + } + }, + "publicDelegatedPrefixes": { + "methods": { + "aggregatedList": { + "description": "Lists all PublicDelegatedPrefix resources owned by the specific project across all scopes.", + "httpMethod": "GET", + "id": "compute.publicDelegatedPrefixes.aggregatedList", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to update.", + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/autoscalers", - "request": { - "$ref": "Autoscaler" - }, + "path": "projects/{project}/aggregated/publicDelegatedPrefixes", "response": { - "$ref": "Operation" + "$ref": "PublicDelegatedPrefixAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionBackendServices": { - "methods": { + }, "delete": { - "description": "Deletes the specified regional BackendService resource.", + "description": "Deletes the specified PublicDelegatedPrefix in the given region.", "httpMethod": "DELETE", - "id": "compute.regionBackendServices.delete", + "id": "compute.publicDelegatedPrefixes.delete", "parameterOrder": [ "project", "region", - "backendService" + "publicDelegatedPrefix" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to delete.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to delete.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -14463,7 +14765,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "response": { "$ref": "Operation" }, @@ -14473,22 +14775,15 @@ ] }, "get": { - "description": "Returns the specified regional BackendService resource.", + "description": "Returns the specified PublicDelegatedPrefix resource in the given region.", "httpMethod": "GET", - "id": "compute.regionBackendServices.get", + "id": "compute.publicDelegatedPrefixes.get", "parameterOrder": [ "project", "region", - "backendService" + "publicDelegatedPrefix" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14496,61 +14791,24 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", - "response": { - "$ref": "BackendService" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getHealth": { - "description": "Gets the most recent health check results for this regional BackendService.", - "httpMethod": "POST", - "id": "compute.regionBackendServices.getHealth", - "parameterOrder": [ - "project", - "region", - "backendService" - ], - "parameters": { - "backendService": { - "description": "Name of the BackendService resource for which to get health.", + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "project": { - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", - "request": { - "$ref": "ResourceGroupReference" - }, + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "response": { - "$ref": "BackendServiceGroupHealth" + "$ref": "PublicDelegatedPrefix" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14559,9 +14817,9 @@ ] }, "insert": { - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", + "description": "Creates a PublicDelegatedPrefix in the specified project in the given region using the parameters that are included in the request.", "httpMethod": "POST", - "id": "compute.regionBackendServices.insert", + "id": "compute.publicDelegatedPrefixes.insert", "parameterOrder": [ "project", "region" @@ -14575,7 +14833,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -14587,9 +14845,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", "request": { - "$ref": "BackendService" + "$ref": "PublicDelegatedPrefix" }, "response": { "$ref": "Operation" @@ -14600,9 +14858,9 @@ ] }, "list": { - "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + "description": "Lists the PublicDelegatedPrefixes for a project in the given region.", "httpMethod": "GET", - "id": "compute.regionBackendServices.list", + "id": "compute.publicDelegatedPrefixes.list", "parameterOrder": [ "project", "region" @@ -14639,21 +14897,21 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/backendServices", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", "response": { - "$ref": "BackendServiceList" + "$ref": "PublicDelegatedPrefixList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14662,31 +14920,31 @@ ] }, "patch": { - "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", "httpMethod": "PATCH", - "id": "compute.regionBackendServices.patch", + "id": "compute.publicDelegatedPrefixes.patch", "parameterOrder": [ "project", "region", - "backendService" + "publicDelegatedPrefix" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to patch.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to patch.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -14698,9 +14956,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "request": { - "$ref": "BackendService" + "$ref": "PublicDelegatedPrefix" }, "response": { "$ref": "Operation" @@ -14709,19 +14967,23 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "update": { - "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview.", - "httpMethod": "PUT", - "id": "compute.regionBackendServices.update", + } + } + }, + "regionAutoscalers": { + "methods": { + "delete": { + "description": "Deletes the specified autoscaler.", + "httpMethod": "DELETE", + "id": "compute.regionAutoscalers.delete", "parameterOrder": [ "project", "region", - "backendService" + "autoscaler" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to update.", + "autoscaler": { + "description": "Name of the autoscaler to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -14747,10 +15009,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", - "request": { - "$ref": "BackendService" - }, + "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", "response": { "$ref": "Operation" }, @@ -14758,82 +15017,19 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionCommitments": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of commitments.", - "httpMethod": "GET", - "id": "compute.regionCommitments.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/aggregated/commitments", - "response": { - "$ref": "CommitmentAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] }, "get": { - "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + "description": "Returns the specified autoscaler.", "httpMethod": "GET", - "id": "compute.regionCommitments.get", + "id": "compute.regionAutoscalers.get", "parameterOrder": [ "project", "region", - "commitment" + "autoscaler" ], "parameters": { - "commitment": { - "description": "Name of the commitment to return.", + "autoscaler": { + "description": "Name of the autoscaler to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -14847,16 +15043,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/commitments/{commitment}", + "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", "response": { - "$ref": "Commitment" + "$ref": "Autoscaler" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14865,9 +15061,9 @@ ] }, "insert": { - "description": "Creates a commitment in the specified project using the data included in the request.", + "description": "Creates an autoscaler in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.regionCommitments.insert", + "id": "compute.regionAutoscalers.insert", "parameterOrder": [ "project", "region" @@ -14881,7 +15077,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -14893,9 +15089,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/commitments", + "path": "projects/{project}/regions/{region}/autoscalers", "request": { - "$ref": "Commitment" + "$ref": "Autoscaler" }, "response": { "$ref": "Operation" @@ -14906,9 +15102,9 @@ ] }, "list": { - "description": "Retrieves a list of commitments contained within the specified region.", + "description": "Retrieves a list of autoscalers contained within the specified region.", "httpMethod": "GET", - "id": "compute.regionCommitments.list", + "id": "compute.regionAutoscalers.list", "parameterOrder": [ "project", "region" @@ -14945,67 +15141,21 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/commitments", - "response": { - "$ref": "CommitmentList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "regionDiskTypes": { - "methods": { - "get": { - "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", - "httpMethod": "GET", - "id": "compute.regionDiskTypes.get", - "parameterOrder": [ - "project", - "region", - "diskType" - ], - "parameters": { - "diskType": { - "description": "Name of the disk type to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/diskTypes/{diskType}", + "path": "projects/{project}/regions/{region}/autoscalers", "response": { - "$ref": "DiskType" + "$ref": "RegionAutoscalerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15013,87 +15163,19 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "description": "Retrieves a list of regional disk types available to the specified project.", - "httpMethod": "GET", - "id": "compute.regionDiskTypes.list", + "patch": { + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionAutoscalers.patch", "parameterOrder": [ "project", "region" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "autoscaler": { + "description": "Name of the autoscaler to patch.", "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/regions/{region}/diskTypes", - "response": { - "$ref": "RegionDiskTypeList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "regionDisks": { - "methods": { - "addResourcePolicies": { - "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", - "httpMethod": "POST", - "id": "compute.regionDisks.addResourcePolicies", - "parameterOrder": [ - "project", - "region", - "disk" - ], - "parameters": { - "disk": { - "description": "The disk name for this request.", - "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, "type": "string" }, "project": { @@ -15104,7 +15186,7 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15116,9 +15198,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + "path": "projects/{project}/regions/{region}/autoscalers", "request": { - "$ref": "RegionDisksAddResourcePoliciesRequest" + "$ref": "Autoscaler" }, "response": { "$ref": "Operation" @@ -15128,21 +15210,19 @@ "https://www.googleapis.com/auth/compute" ] }, - "createSnapshot": { - "description": "Creates a snapshot of this regional disk.", - "httpMethod": "POST", - "id": "compute.regionDisks.createSnapshot", + "update": { + "description": "Updates an autoscaler in the specified project using the data included in the request.", + "httpMethod": "PUT", + "id": "compute.regionAutoscalers.update", "parameterOrder": [ "project", - "region", - "disk" + "region" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk to snapshot.", - "location": "path", + "autoscaler": { + "description": "Name of the autoscaler to update.", + "location": "query", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, "type": "string" }, "project": { @@ -15153,7 +15233,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15165,9 +15245,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + "path": "projects/{project}/regions/{region}/autoscalers", "request": { - "$ref": "Snapshot" + "$ref": "Autoscaler" }, "response": { "$ref": "Operation" @@ -15176,20 +15256,25 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, + } + } + }, + "regionBackendServices": { + "methods": { "delete": { - "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + "description": "Deletes the specified regional BackendService resource.", "httpMethod": "DELETE", - "id": "compute.regionDisks.delete", + "id": "compute.regionBackendServices.delete", "parameterOrder": [ "project", "region", - "disk" + "backendService" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk to delete.", + "backendService": { + "description": "Name of the BackendService resource to delete.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15201,7 +15286,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15213,7 +15298,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "response": { "$ref": "Operation" }, @@ -15223,17 +15308,17 @@ ] }, "get": { - "description": "Returns a specified regional persistent disk.", + "description": "Returns the specified regional BackendService resource.", "httpMethod": "GET", - "id": "compute.regionDisks.get", + "id": "compute.regionBackendServices.get", "parameterOrder": [ "project", "region", - "disk" + "backendService" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk to return.", + "backendService": { + "description": "Name of the BackendService resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -15247,16 +15332,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "response": { - "$ref": "Disk" + "$ref": "BackendService" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15264,47 +15349,43 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "httpMethod": "GET", - "id": "compute.regionDisks.getIamPolicy", + "getHealth": { + "description": "Gets the most recent health check results for this regional BackendService.", + "httpMethod": "POST", + "id": "compute.regionBackendServices.getHealth", "parameterOrder": [ "project", "region", - "resource" + "backendService" ], "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", - "format": "int32", - "location": "query", - "type": "integer" + "backendService": { + "description": "Name of the BackendService resource for which to get health.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" }, "project": { - "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", + "request": { + "$ref": "ResourceGroupReference" + }, "response": { - "$ref": "Policy" + "$ref": "BackendServiceGroupHealth" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15313,9 +15394,9 @@ ] }, "insert": { - "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "httpMethod": "POST", - "id": "compute.regionDisks.insert", + "id": "compute.regionBackendServices.insert", "parameterOrder": [ "project", "region" @@ -15329,7 +15410,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15339,16 +15420,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "sourceImage": { - "description": "Source image to restore onto a disk. This field is optional.", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks", + "path": "projects/{project}/regions/{region}/backendServices", "request": { - "$ref": "Disk" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -15359,9 +15435,9 @@ ] }, "list": { - "description": "Retrieves the list of persistent disks contained within the specified region.", + "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", "httpMethod": "GET", - "id": "compute.regionDisks.list", + "id": "compute.regionBackendServices.list", "parameterOrder": [ "project", "region" @@ -15398,21 +15474,21 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/disks", + "path": "projects/{project}/regions/{region}/backendServices", "response": { - "$ref": "DiskList" + "$ref": "BackendServiceList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15420,18 +15496,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "removeResourcePolicies": { - "description": "Removes resource policies from a regional disk.", - "httpMethod": "POST", - "id": "compute.regionDisks.removeResourcePolicies", + "patch": { + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionBackendServices.patch", "parameterOrder": [ "project", "region", - "disk" + "backendService" ], "parameters": { - "disk": { - "description": "The disk name for this request.", + "backendService": { + "description": "Name of the BackendService resource to patch.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -15445,7 +15521,7 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15457,9 +15533,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "request": { - "$ref": "RegionDisksRemoveResourcePoliciesRequest" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -15469,32 +15545,32 @@ "https://www.googleapis.com/auth/compute" ] }, - "resize": { - "description": "Resizes the specified regional persistent disk.", - "httpMethod": "POST", - "id": "compute.regionDisks.resize", + "update": { + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview.", + "httpMethod": "PUT", + "id": "compute.regionBackendServices.update", "parameterOrder": [ "project", "region", - "disk" + "backendService" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk.", + "backendService": { + "description": "Name of the BackendService resource to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "project": { - "description": "The project ID for this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15506,9 +15582,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/resize", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "request": { - "$ref": "RegionDisksResizeRequest" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -15517,110 +15593,87 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "httpMethod": "POST", - "id": "compute.regionDisks.setIamPolicy", + } + } + }, + "regionCommitments": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of commitments.", + "httpMethod": "GET", + "id": "compute.regionCommitments.aggregatedList", "parameterOrder": [ - "project", - "region", - "resource" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", - "request": { - "$ref": "RegionSetPolicyRequest" - }, + "path": "projects/{project}/aggregated/commitments", "response": { - "$ref": "Policy" + "$ref": "CommitmentAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setLabels": { - "description": "Sets the labels on the target regional disk.", - "httpMethod": "POST", - "id": "compute.regionDisks.setLabels", + "get": { + "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionCommitments.get", "parameterOrder": [ "project", "region", - "resource" + "commitment" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", + "commitment": { + "description": "Name of the commitment to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", - "request": { - "$ref": "RegionSetLabelsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.regionDisks.testIamPermissions", - "parameterOrder": [ - "project", - "region", - "resource" - ], - "parameters": { + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15629,53 +15682,32 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "projects/{project}/regions/{region}/commitments/{commitment}", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Commitment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionHealthCheckServices": { - "methods": { - "delete": { - "description": "Deletes the specified regional HealthCheckService.", - "httpMethod": "DELETE", - "id": "compute.regionHealthCheckServices.delete", + }, + "insert": { + "description": "Creates a commitment in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionCommitments.insert", "parameterOrder": [ "project", - "region", - "healthCheckService" + "region" ], "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15684,7 +15716,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15696,7 +15728,10 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "path": "projects/{project}/regions/{region}/commitments", + "request": { + "$ref": "Commitment" + }, "response": { "$ref": "Operation" }, @@ -15705,20 +15740,36 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified regional HealthCheckService resource.", + "list": { + "description": "Retrieves a list of commitments contained within the specified region.", "httpMethod": "GET", - "id": "compute.regionHealthCheckServices.get", + "id": "compute.regionCommitments.list", "parameterOrder": [ "project", - "region", - "healthCheckService" + "region" ], "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", - "location": "path", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { @@ -15729,32 +15780,49 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "path": "projects/{project}/regions/{region}/commitments", "response": { - "$ref": "HealthCheckService" + "$ref": "CommitmentList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "insert": { - "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", - "httpMethod": "POST", - "id": "compute.regionHealthCheckServices.insert", + } + } + }, + "regionDiskTypes": { + "methods": { + "get": { + "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionDiskTypes.get", "parameterOrder": [ "project", - "region" + "region", + "diskType" ], "parameters": { + "diskType": { + "description": "Name of the disk type to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15763,34 +15831,27 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices", - "request": { - "$ref": "HealthCheckService" - }, + "path": "projects/{project}/regions/{region}/diskTypes/{diskType}", "response": { - "$ref": "Operation" + "$ref": "DiskType" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", + "description": "Retrieves a list of regional disk types available to the specified project.", "httpMethod": "GET", - "id": "compute.regionHealthCheckServices.list", + "id": "compute.regionDiskTypes.list", "parameterOrder": [ "project", "region" @@ -15827,41 +15888,95 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices", + "path": "projects/{project}/regions/{region}/diskTypes", "response": { - "$ref": "HealthCheckServicesList" + "$ref": "RegionDiskTypeList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + } + } + }, + "regionDisks": { + "methods": { + "addResourcePolicies": { + "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + "httpMethod": "POST", + "id": "compute.regionDisks.addResourcePolicies", + "parameterOrder": [ + "project", + "region", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + "request": { + "$ref": "RegionDisksAddResourcePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] }, - "patch": { - "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.regionHealthCheckServices.patch", + "createSnapshot": { + "description": "Creates a snapshot of this regional disk.", + "httpMethod": "POST", + "id": "compute.regionDisks.createSnapshot", "parameterOrder": [ "project", "region", - "healthCheckService" + "disk" ], "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "disk": { + "description": "Name of the regional persistent disk to snapshot.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15873,7 +15988,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15885,9 +16000,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", "request": { - "$ref": "HealthCheckService" + "$ref": "Snapshot" }, "response": { "$ref": "Operation" @@ -15896,25 +16011,20 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionHealthChecks": { - "methods": { + }, "delete": { - "description": "Deletes the specified HealthCheck resource.", + "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", "httpMethod": "DELETE", - "id": "compute.regionHealthChecks.delete", + "id": "compute.regionDisks.delete", "parameterOrder": [ "project", "region", - "healthCheck" + "disk" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to delete.", + "disk": { + "description": "Name of the regional persistent disk to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15926,7 +16036,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15938,7 +16048,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/disks/{disk}", "response": { "$ref": "Operation" }, @@ -15948,17 +16058,17 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "description": "Returns a specified regional persistent disk.", "httpMethod": "GET", - "id": "compute.regionHealthChecks.get", + "id": "compute.regionDisks.get", "parameterOrder": [ "project", "region", - "healthCheck" + "disk" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to return.", + "disk": { + "description": "Name of the regional persistent disk to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -15972,16 +16082,64 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/disks/{disk}", "response": { - "$ref": "HealthCheck" + "$ref": "Disk" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "httpMethod": "GET", + "id": "compute.regionDisks.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15990,9 +16148,9 @@ ] }, "insert": { - "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a persistent regional disk in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.regionHealthChecks.insert", + "id": "compute.regionDisks.insert", "parameterOrder": [ "project", "region" @@ -16006,7 +16164,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -16016,11 +16174,16 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "sourceImage": { + "description": "Source image to restore onto a disk. This field is optional.", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks", + "path": "projects/{project}/regions/{region}/disks", "request": { - "$ref": "HealthCheck" + "$ref": "Disk" }, "response": { "$ref": "Operation" @@ -16031,9 +16194,9 @@ ] }, "list": { - "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "description": "Retrieves the list of persistent disks contained within the specified region.", "httpMethod": "GET", - "id": "compute.regionHealthChecks.list", + "id": "compute.regionDisks.list", "parameterOrder": [ "project", "region" @@ -16070,21 +16233,21 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/healthChecks", + "path": "projects/{project}/regions/{region}/disks", "response": { - "$ref": "HealthCheckList" + "$ref": "DiskList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16092,18 +16255,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.regionHealthChecks.patch", + "removeResourcePolicies": { + "description": "Removes resource policies from a regional disk.", + "httpMethod": "POST", + "id": "compute.regionDisks.removeResourcePolicies", "parameterOrder": [ "project", "region", - "healthCheck" + "disk" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to patch.", + "disk": { + "description": "The disk name for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -16117,7 +16280,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -16129,9 +16292,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", "request": { - "$ref": "HealthCheck" + "$ref": "RegionDisksRemoveResourcePoliciesRequest" }, "response": { "$ref": "Operation" @@ -16141,32 +16304,32 @@ "https://www.googleapis.com/auth/compute" ] }, - "update": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - "httpMethod": "PUT", - "id": "compute.regionHealthChecks.update", + "resize": { + "description": "Resizes the specified regional persistent disk.", + "httpMethod": "POST", + "id": "compute.regionDisks.resize", "parameterOrder": [ "project", "region", - "healthCheck" + "disk" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to update.", + "disk": { + "description": "Name of the regional persistent disk.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "The project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -16178,9 +16341,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/disks/{disk}/resize", "request": { - "$ref": "HealthCheck" + "$ref": "RegionDisksResizeRequest" }, "response": { "$ref": "Operation" @@ -16189,27 +16352,17 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionInstanceGroupManagers": { - "methods": { - "abandonInstances": { - "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.abandonInstances", + "id": "compute.regionDisks.setIamPolicy", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "resource" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16218,45 +16371,42 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", "request": { - "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "applyUpdatesToInstances": { - "description": "Apply updates to selected instances the managed instance group.", + "setLabels": { + "description": "Sets the labels on the target regional disk.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", + "id": "compute.regionDisks.setLabels", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "resource" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group, should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16265,15 +16415,28 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", "request": { - "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + "$ref": "RegionSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -16283,22 +16446,16 @@ "https://www.googleapis.com/auth/compute" ] }, - "createInstances": { - "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.createInstances", + "id": "compute.regionDisks.testIamPermissions", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "resource" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16307,41 +16464,49 @@ "type": "string" }, "region": { - "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", "request": { - "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, + } + } + }, + "regionHealthCheckServices": { + "methods": { "delete": { - "description": "Deletes the specified managed instance group and all of the instances in that group.", + "description": "Deletes the specified regional HealthCheckService.", "httpMethod": "DELETE", - "id": "compute.regionInstanceGroupManagers.delete", + "id": "compute.regionHealthCheckServices.delete", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "healthCheckService" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group to delete.", + "healthCheckService": { + "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", "required": true, "type": "string" @@ -16356,6 +16521,7 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -16365,7 +16531,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", "response": { "$ref": "Operation" }, @@ -16374,18 +16540,18 @@ "https://www.googleapis.com/auth/compute" ] }, - "deleteInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.deleteInstances", + "get": { + "description": "Returns the specified regional HealthCheckService resource.", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.get", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "healthCheckService" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", "required": true, "type": "string" @@ -16400,43 +16566,30 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - "request": { - "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" - }, + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", "response": { - "$ref": "Operation" + "$ref": "HealthCheckService" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "deletePerInstanceConfigs": { - "description": "Deletes selected per-instance configs for the managed instance group.", + "insert": { + "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", + "id": "compute.regionHealthCheckServices.insert", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16445,15 +16598,21 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + "path": "projects/{project}/regions/{region}/healthCheckServices", "request": { - "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" + "$ref": "HealthCheckService" }, "response": { "$ref": "Operation" @@ -16463,20 +16622,36 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns all of the details about the specified managed instance group.", + "list": { + "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.get", + "id": "compute.regionHealthCheckServices.list", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group to return.", - "location": "path", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { @@ -16489,13 +16664,19 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "path": "projects/{project}/regions/{region}/healthCheckServices", "response": { - "$ref": "InstanceGroupManager" + "$ref": "HealthCheckServicesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16503,15 +16684,22 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.insert", + "patch": { + "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionHealthCheckServices.patch", "parameterOrder": [ "project", - "region" + "region", + "healthCheckService" ], "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16522,6 +16710,7 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -16531,9 +16720,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers", + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", "request": { - "$ref": "InstanceGroupManager" + "$ref": "HealthCheckService" }, "response": { "$ref": "Operation" @@ -16542,37 +16731,26 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Retrieves the list of managed instance groups that are contained within the specified region.", - "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.list", + } + } + }, + "regionHealthChecks": { + "methods": { + "delete": { + "description": "Deletes the specified HealthCheck resource.", + "httpMethod": "DELETE", + "id": "compute.regionHealthChecks.delete", "parameterOrder": [ "project", - "region" + "region", + "healthCheck" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "healthCheck": { + "description": "Name of the HealthCheck resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, "project": { @@ -16585,64 +16763,42 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "response": { - "$ref": "RegionInstanceGroupManagerList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", + "get": { + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.listErrors", + "id": "compute.regionHealthChecks.get", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "healthCheck" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "instanceGroupManager": { - "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", + "healthCheck": { + "description": "Name of the HealthCheck resource to return.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16651,20 +16807,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request. This should conform to RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "response": { - "$ref": "RegionInstanceGroupManagersListErrorsResponse" + "$ref": "HealthCheck" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16672,45 +16824,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", + "insert": { + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.listManagedInstances", + "id": "compute.regionHealthChecks.insert", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "instanceGroupManager": { - "description": "The name of the managed instance group.", - "location": "path", - "required": true, - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16721,33 +16843,35 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "path": "projects/{project}/regions/{region}/healthChecks", + "request": { + "$ref": "HealthCheck" + }, "response": { - "$ref": "RegionInstanceGroupManagersListInstancesResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listPerInstanceConfigs": { - "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", + "list": { + "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.regionHealthChecks.list", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { "filter": { @@ -16755,12 +16879,6 @@ "location": "query", "type": "string" }, - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -16787,20 +16905,21 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + "path": "projects/{project}/regions/{region}/healthChecks", "response": { - "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" + "$ref": "HealthCheckList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16809,18 +16928,19 @@ ] }, "patch": { - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", - "id": "compute.regionInstanceGroupManagers.patch", + "id": "compute.regionHealthChecks.patch", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "healthCheck" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the instance group manager.", + "healthCheck": { + "description": "Name of the HealthCheck resource to patch.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -16834,6 +16954,7 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -16843,9 +16964,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "request": { - "$ref": "InstanceGroupManager" + "$ref": "HealthCheck" }, "response": { "$ref": "Operation" @@ -16855,19 +16976,20 @@ "https://www.googleapis.com/auth/compute" ] }, - "patchPerInstanceConfigs": { - "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", + "update": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + "httpMethod": "PUT", + "id": "compute.regionHealthChecks.update", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "healthCheck" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", + "healthCheck": { + "description": "Name of the HealthCheck resource to update.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -16879,8 +17001,9 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -16890,9 +17013,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "request": { - "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" + "$ref": "HealthCheck" }, "response": { "$ref": "Operation" @@ -16901,11 +17024,15 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + } + } + }, + "regionInstanceGroupManagers": { + "methods": { + "abandonInstances": { + "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.recreateInstances", + "id": "compute.regionInstanceGroupManagers.abandonInstances", "parameterOrder": [ "project", "region", @@ -16937,9 +17064,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", "request": { - "$ref": "RegionInstanceGroupManagersRecreateRequest" + "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" }, "response": { "$ref": "Operation" @@ -16949,19 +17076,18 @@ "https://www.googleapis.com/auth/compute" ] }, - "resize": { - "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "applyUpdatesToInstances": { + "description": "Apply updates to selected instances the managed instance group.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.resize", + "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", "parameterOrder": [ "project", "region", - "instanceGroupManager", - "size" + "instanceGroupManager" ], "parameters": { "instanceGroupManager": { - "description": "Name of the managed instance group.", + "description": "The name of the managed instance group, should conform to RFC1035.", "location": "path", "required": true, "type": "string" @@ -16974,26 +17100,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "size": { - "description": "Number of instances that should exist in this instance group manager.", - "format": "int32", - "location": "query", - "minimum": "0", - "required": true, - "type": "integer" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "request": { + "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + }, "response": { "$ref": "Operation" }, @@ -17002,10 +17118,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setInstanceTemplate": { - "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + "createInstances": { + "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + "id": "compute.regionInstanceGroupManagers.createInstances", "parameterOrder": [ "project", "region", @@ -17013,7 +17129,7 @@ ], "parameters": { "instanceGroupManager": { - "description": "The name of the managed instance group.", + "description": "The name of the managed instance group. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" @@ -17026,20 +17142,20 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", "request": { - "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" }, "response": { "$ref": "Operation" @@ -17049,10 +17165,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setTargetPools": { - "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.setTargetPools", + "delete": { + "description": "Deletes the specified managed instance group and all of the instances in that group.", + "httpMethod": "DELETE", + "id": "compute.regionInstanceGroupManagers.delete", "parameterOrder": [ "project", "region", @@ -17060,7 +17176,7 @@ ], "parameters": { "instanceGroupManager": { - "description": "Name of the managed instance group.", + "description": "Name of the managed instance group to delete.", "location": "path", "required": true, "type": "string" @@ -17084,10 +17200,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - "request": { - "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - }, + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", "response": { "$ref": "Operation" }, @@ -17096,10 +17209,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "updatePerInstanceConfigs": { - "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + "deleteInstances": { + "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + "id": "compute.regionInstanceGroupManagers.deleteInstances", "parameterOrder": [ "project", "region", @@ -17107,7 +17220,7 @@ ], "parameters": { "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", + "description": "Name of the managed instance group.", "location": "path", "required": true, "type": "string" @@ -17120,7 +17233,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", "required": true, "type": "string" @@ -17131,9 +17244,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", "request": { - "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" }, "response": { "$ref": "Operation" @@ -17142,23 +17255,19 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionInstanceGroups": { - "methods": { - "get": { - "description": "Returns the specified instance group resource.", - "httpMethod": "GET", - "id": "compute.regionInstanceGroups.get", + }, + "deletePerInstanceConfigs": { + "description": "Deletes selected per-instance configs for the managed instance group.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", "parameterOrder": [ "project", "region", - "instanceGroup" + "instanceGroupManager" ], "parameters": { - "instanceGroup": { - "description": "Name of the instance group resource to return.", + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" @@ -17171,34 +17280,116 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" + }, "response": { - "$ref": "InstanceGroup" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of instance group resources contained within the specified region.", + "get": { + "description": "Returns all of the details about the specified managed instance group.", "httpMethod": "GET", - "id": "compute.regionInstanceGroups.list", + "id": "compute.regionInstanceGroupManagers.get", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "instanceGroupManager": { + "description": "Name of the managed instance group to return.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "response": { + "$ref": "InstanceGroupManager" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/instanceGroupManagers", + "request": { + "$ref": "InstanceGroupManager" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + "httpMethod": "GET", + "id": "compute.regionInstanceGroupManagers.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", "type": "string" }, "maxResults": { @@ -17233,14 +17424,14 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroups", + "path": "projects/{project}/regions/{region}/instanceGroupManagers", "response": { - "$ref": "RegionInstanceGroupList" + "$ref": "RegionInstanceGroupManagerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17248,14 +17439,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "listInstances": { - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", - "httpMethod": "POST", - "id": "compute.regionInstanceGroups.listInstances", + "listErrors": { + "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", + "httpMethod": "GET", + "id": "compute.regionInstanceGroupManagers.listErrors", "parameterOrder": [ "project", "region", - "instanceGroup" + "instanceGroupManager" ], "parameters": { "filter": { @@ -17263,8 +17454,8 @@ "location": "query", "type": "string" }, - "instanceGroup": { - "description": "Name of the regional instance group for which we want to list the instances.", + "instanceGroupManager": { + "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}.", "location": "path", "required": true, "type": "string" @@ -17295,23 +17486,20 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request. This should conform to RFC1035.", "location": "path", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - "request": { - "$ref": "RegionInstanceGroupsListInstancesRequest" - }, + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", "response": { - "$ref": "RegionInstanceGroupsListInstances" + "$ref": "RegionInstanceGroupManagersListErrorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17319,22 +17507,45 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setNamedPorts": { - "description": "Sets the named ports for the specified regional instance group.", + "listManagedInstances": { + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", "httpMethod": "POST", - "id": "compute.regionInstanceGroups.setNamedPorts", + "id": "compute.regionInstanceGroupManagers.listManagedInstances", "parameterOrder": [ "project", "region", - "instanceGroup" + "instanceGroupManager" ], "parameters": { - "instanceGroup": { - "description": "The name of the regional instance group where the named ports are updated.", + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "instanceGroupManager": { + "description": "The name of the managed instance group.", "location": "path", "required": true, "type": "string" }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17348,44 +17559,61 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - "request": { - "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - }, + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "response": { - "$ref": "Operation" + "$ref": "RegionInstanceGroupManagersListInstancesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionNetworkEndpointGroups": { - "methods": { - "delete": { - "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", - "httpMethod": "DELETE", - "id": "compute.regionNetworkEndpointGroups.delete", + }, + "listPerInstanceConfigs": { + "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", "parameterOrder": [ "project", "region", - "networkEndpointGroup" + "instanceGroupManager" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17394,38 +17622,39 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", "response": { - "$ref": "Operation" + "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.get", + "patch": { + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionInstanceGroupManagers.patch", "parameterOrder": [ "project", "region", - "networkEndpointGroup" + "instanceGroupManager" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group. It should comply with RFC1035.", + "instanceGroupManager": { + "description": "The name of the instance group manager.", "location": "path", "required": true, "type": "string" @@ -17438,31 +17667,45 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "request": { + "$ref": "InstanceGroupManager" + }, "response": { - "$ref": "NetworkEndpointGroup" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "patchPerInstanceConfigs": { + "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", "httpMethod": "POST", - "id": "compute.regionNetworkEndpointGroups.insert", + "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17471,7 +17714,7 @@ "type": "string" }, "region": { - "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", "required": true, "type": "string" @@ -17482,9 +17725,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", "request": { - "$ref": "NetworkEndpointGroup" + "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" }, "response": { "$ref": "Operation" @@ -17494,37 +17737,69 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", - "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.list", + "recreateInstances": { + "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.recreateInstances", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "instanceGroupManager": { + "description": "Name of the managed instance group.", + "location": "path", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "request": { + "$ref": "RegionInstanceGroupManagersRecreateRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "resize": { + "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.resize", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager", + "size" + ], + "parameters": { + "instanceGroupManager": { + "description": "Name of the managed instance group.", + "location": "path", + "required": true, + "type": "string" }, "project": { "description": "Project ID for this request.", @@ -17534,45 +17809,47 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" + }, + "size": { + "description": "Number of instances that should exist in this instance group manager.", + "format": "int32", + "location": "query", + "minimum": "0", + "required": true, + "type": "integer" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", "response": { - "$ref": "NetworkEndpointGroupList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionNotificationEndpoints": { - "methods": { - "delete": { - "description": "Deletes the specified NotificationEndpoint in the given region", - "httpMethod": "DELETE", - "id": "compute.regionNotificationEndpoints.delete", + }, + "setInstanceTemplate": { + "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", "parameterOrder": [ "project", "region", - "notificationEndpoint" + "instanceGroupManager" ], "parameters": { - "notificationEndpoint": { - "description": "Name of the NotificationEndpoint resource to delete.", + "instanceGroupManager": { + "description": "The name of the managed instance group.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -17586,7 +17863,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -17596,7 +17872,10 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + "request": { + "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + }, "response": { "$ref": "Operation" }, @@ -17605,20 +17884,19 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified NotificationEndpoint resource in the given region.", - "httpMethod": "GET", - "id": "compute.regionNotificationEndpoints.get", + "setTargetPools": { + "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.setTargetPools", "parameterOrder": [ "project", "region", - "notificationEndpoint" + "instanceGroupManager" ], "parameters": { - "notificationEndpoint": { - "description": "Name of the NotificationEndpoint resource to return.", + "instanceGroupManager": { + "description": "Name of the managed instance group.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -17632,30 +17910,43 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "request": { + "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" + }, "response": { - "$ref": "NotificationEndpoint" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", + "updatePerInstanceConfigs": { + "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", "httpMethod": "POST", - "id": "compute.regionNotificationEndpoints.insert", + "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17664,9 +17955,8 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -17676,9 +17966,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/notificationEndpoints", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", "request": { - "$ref": "NotificationEndpoint" + "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" }, "response": { "$ref": "Operation" @@ -17687,11 +17977,55 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + } + } + }, + "regionInstanceGroups": { + "methods": { + "get": { + "description": "Returns the specified instance group resource.", + "httpMethod": "GET", + "id": "compute.regionInstanceGroups.get", + "parameterOrder": [ + "project", + "region", + "instanceGroup" + ], + "parameters": { + "instanceGroup": { + "description": "Name of the instance group resource to return.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", + "response": { + "$ref": "InstanceGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] }, "list": { - "description": "Lists the NotificationEndpoints for a project in the given region.", + "description": "Retrieves the list of instance group resources contained within the specified region.", "httpMethod": "GET", - "id": "compute.regionNotificationEndpoints.list", + "id": "compute.regionInstanceGroups.list", "parameterOrder": [ "project", "region" @@ -17730,83 +18064,62 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/notificationEndpoints", + "path": "projects/{project}/regions/{region}/instanceGroups", "response": { - "$ref": "NotificationEndpointList" + "$ref": "RegionInstanceGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionOperations": { - "methods": { - "delete": { - "description": "Deletes the specified region-specific Operations resource.", - "httpMethod": "DELETE", - "id": "compute.regionOperations.delete", + }, + "listInstances": { + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroups.listInstances", "parameterOrder": [ "project", "region", - "operation" + "instanceGroup" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", + "instanceGroup": { + "description": "Name of the regional instance group for which we want to list the instances.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/operations/{operation}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Retrieves the specified region-specific Operations resource.", - "httpMethod": "GET", - "id": "compute.regionOperations.get", - "parameterOrder": [ - "project", - "region", - "operation" - ], - "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { @@ -17817,16 +18130,23 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/operations/{operation}", + "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + "request": { + "$ref": "RegionInstanceGroupsListInstancesRequest" + }, "response": { - "$ref": "Operation" + "$ref": "RegionInstanceGroupsListInstances" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17834,36 +18154,20 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "description": "Retrieves a list of Operation resources contained within the specified region.", - "httpMethod": "GET", - "id": "compute.regionOperations.list", + "setNamedPorts": { + "description": "Sets the named ports for the specified regional instance group.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroups.setNamedPorts", "parameterOrder": [ "project", - "region" + "region", + "instanceGroup" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "instanceGroup": { + "description": "The name of the regional instance group where the named ports are updated.", + "location": "path", + "required": true, "type": "string" }, "project": { @@ -17874,45 +18178,42 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/operations", + "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + "request": { + "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + }, "response": { - "$ref": "OperationList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - }, - "wait": { - "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + } + } + }, + "regionInstances": { + "methods": { + "bulkInsert": { + "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", "httpMethod": "POST", - "id": "compute.regionOperations.wait", + "id": "compute.regionInstances.bulkInsert", "parameterOrder": [ "project", - "region", - "operation" + "region" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17921,37 +18222,50 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/operations/{operation}/wait", + "path": "projects/{project}/regions/{region}/instances/bulkInsert", + "request": { + "$ref": "BulkInsertInstanceResource" + }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } }, - "regionSslCertificates": { + "regionNetworkEndpointGroups": { "methods": { "delete": { - "description": "Deletes the specified SslCertificate resource in the region.", + "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", "httpMethod": "DELETE", - "id": "compute.regionSslCertificates.delete", + "id": "compute.regionNetworkEndpointGroups.delete", "parameterOrder": [ "project", "region", - "sslCertificate" + "networkEndpointGroup" ], "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17960,9 +18274,8 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -17970,16 +18283,9 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", "response": { "$ref": "Operation" }, @@ -17989,40 +18295,38 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", "httpMethod": "GET", - "id": "compute.regionSslCertificates.get", + "id": "compute.regionNetworkEndpointGroups.get", "parameterOrder": [ "project", "region", - "sslCertificate" + "networkEndpointGroup" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to return.", + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", "response": { - "$ref": "SslCertificate" + "$ref": "NetworkEndpointGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18031,9 +18335,9 @@ ] }, "insert": { - "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", "httpMethod": "POST", - "id": "compute.regionSslCertificates.insert", + "id": "compute.regionNetworkEndpointGroups.insert", "parameterOrder": [ "project", "region" @@ -18047,9 +18351,8 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18059,9 +18362,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/sslCertificates", + "path": "projects/{project}/regions/{region}/networkEndpointGroups", "request": { - "$ref": "SslCertificate" + "$ref": "NetworkEndpointGroup" }, "response": { "$ref": "Operation" @@ -18072,9 +18375,9 @@ ] }, "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", "httpMethod": "GET", - "id": "compute.regionSslCertificates.list", + "id": "compute.regionNetworkEndpointGroups.list", "parameterOrder": [ "project", "region" @@ -18111,21 +18414,20 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/sslCertificates", + "path": "projects/{project}/regions/{region}/networkEndpointGroups", "response": { - "$ref": "SslCertificateList" + "$ref": "NetworkEndpointGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18135,18 +18437,25 @@ } } }, - "regionTargetHttpProxies": { + "regionNotificationEndpoints": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", + "description": "Deletes the specified NotificationEndpoint in the given region", "httpMethod": "DELETE", - "id": "compute.regionTargetHttpProxies.delete", + "id": "compute.regionNotificationEndpoints.delete", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "notificationEndpoint" ], "parameters": { + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18165,16 +18474,9 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { "$ref": "Operation" }, @@ -18184,15 +18486,22 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified NotificationEndpoint resource in the given region.", "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.get", + "id": "compute.regionNotificationEndpoints.get", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "notificationEndpoint" ], "parameters": { + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18206,18 +18515,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { - "$ref": "TargetHttpProxy" + "$ref": "NotificationEndpoint" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18226,9 +18528,9 @@ ] }, "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.insert", + "id": "compute.regionNotificationEndpoints.insert", "parameterOrder": [ "project", "region" @@ -18254,9 +18556,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", + "path": "projects/{project}/regions/{region}/notificationEndpoints", "request": { - "$ref": "TargetHttpProxy" + "$ref": "NotificationEndpoint" }, "response": { "$ref": "Operation" @@ -18267,9 +18569,9 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", + "description": "Lists the NotificationEndpoints for a project in the given region.", "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.list", + "id": "compute.regionNotificationEndpoints.list", "parameterOrder": [ "project", "region" @@ -18313,31 +18615,80 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", + "path": "projects/{project}/regions/{region}/notificationEndpoints", "response": { - "$ref": "TargetHttpProxyList" + "$ref": "NotificationEndpointList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "description": "Deletes the specified region-specific Operations resource.", + "httpMethod": "DELETE", + "id": "compute.regionOperations.delete", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/operations/{operation}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy.", - "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.setUrlMap", + "get": { + "description": "Retrieves the specified region-specific Operations resource.", + "httpMethod": "GET", + "id": "compute.regionOperations.get", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "operation" ], "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18346,49 +18697,139 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of Operation resources contained within the specified region.", + "httpMethod": "GET", + "id": "compute.regionOperations.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy to set a URL map for.", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", - "request": { - "$ref": "UrlMapReference" + "path": "projects/{project}/regions/{region}/operations", + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress.\n\nThis method is called on a best-effort basis. Specifically: \n- In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. \n- If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`.", + "httpMethod": "POST", + "id": "compute.regionOperations.wait", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } }, + "path": "projects/{project}/regions/{region}/operations/{operation}/wait", "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] } } }, - "regionTargetHttpsProxies": { + "regionSslCertificates": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpsProxy resource.", + "description": "Deletes the specified SslCertificate resource in the region.", "httpMethod": "DELETE", - "id": "compute.regionTargetHttpsProxies.delete", + "id": "compute.regionSslCertificates.delete", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "sslCertificate" ], "parameters": { "project": { @@ -18410,15 +18851,15 @@ "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to delete.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", "response": { "$ref": "Operation" }, @@ -18428,13 +18869,13 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.get", + "id": "compute.regionSslCertificates.get", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "sslCertificate" ], "parameters": { "project": { @@ -18451,17 +18892,17 @@ "required": true, "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to return.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", "response": { - "$ref": "TargetHttpsProxy" + "$ref": "SslCertificate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18470,9 +18911,9 @@ ] }, "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.insert", + "id": "compute.regionSslCertificates.insert", "parameterOrder": [ "project", "region" @@ -18498,9 +18939,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/sslCertificates", "request": { - "$ref": "TargetHttpsProxy" + "$ref": "SslCertificate" }, "response": { "$ref": "Operation" @@ -18511,9 +18952,9 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", + "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.list", + "id": "compute.regionSslCertificates.list", "parameterOrder": [ "project", "region" @@ -18557,29 +18998,468 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/sslCertificates", "response": { - "$ref": "TargetHttpsProxyList" + "$ref": "SslCertificateList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy.", - "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.setSslCertificates", - "parameterOrder": [ - "project", - "region", - "targetHttpsProxy" + } + } + }, + "regionTargetHttpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpProxy resource.", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionTargetHttpProxies.get", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "TargetHttpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionTargetHttpProxies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpProxies", + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", + "httpMethod": "GET", + "id": "compute.regionTargetHttpProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpProxies", + "response": { + "$ref": "TargetHttpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setUrlMap": { + "description": "Changes the URL map for TargetHttpProxy.", + "httpMethod": "POST", + "id": "compute.regionTargetHttpProxies.setUrlMap", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionTargetHttpsProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpsProxy resource.", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpsProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.get", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "response": { + "$ref": "TargetHttpsProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "response": { + "$ref": "TargetHttpsProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setSslCertificates", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" ], "parameters": { "project": { @@ -18850,7 +19730,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -19084,7 +19964,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -19147,7 +20027,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -19379,7 +20259,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -19587,7 +20467,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -19826,7 +20706,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -19978,7 +20858,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -20129,7 +21009,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -20281,7 +21161,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -20587,7 +21467,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -20826,7 +21706,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -20880,7 +21760,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -21171,7 +22051,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -21343,7 +22223,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -21502,7 +22382,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -21663,7 +22543,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -21717,7 +22597,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -21820,7 +22700,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -22108,7 +22988,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -22162,7 +23042,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -22518,7 +23398,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -22622,7 +23502,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -22781,7 +23661,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -22926,7 +23806,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -23085,7 +23965,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -23351,7 +24231,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -23535,7 +24415,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -23703,7 +24583,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -23939,7 +24819,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -24255,7 +25135,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -24581,7 +25461,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -24726,7 +25606,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -24917,7 +25797,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -24980,7 +25860,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -25180,7 +26060,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -25361,7 +26241,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -25594,7 +26474,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -25751,7 +26631,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -25942,7 +26822,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -26081,7 +26961,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" }, @@ -26222,7 +27102,7 @@ "type": "string" }, "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } @@ -26240,7 +27120,7 @@ } } }, - "revision": "20210111", + "revision": "20210322", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -26775,16 +27655,18 @@ "type": "string" }, "prefixLength": { - "description": "The prefix length if the resource reprensents an IP range.", + "description": "The prefix length if the resource represents an IP range.", "format": "int32", "type": "integer" }, "purpose": { - "description": "The purpose of this resource, which can be one of the following values: \n- `GCE_ENDPOINT` for addresses that are used by VM instances, alias IP ranges, internal load balancers, and similar resources. \n- `DNS_RESOLVER` for a DNS resolver address in a subnetwork \n- `VPC_PEERING` for addresses that are reserved for VPC peer networks. \n- `NAT_AUTO` for addresses that are external IP addresses automatically reserved for Cloud NAT. \n- `IPSEC_INTERCONNECT` for addresses created from a private IP range that are reserved for a VLAN attachment in an IPsec encrypted Interconnect configuration. These addresses are regional resources.", + "description": "The purpose of this resource, which can be one of the following values: \n- `GCE_ENDPOINT` for addresses that are used by VM instances, alias IP ranges, internal load balancers, and similar resources. \n- `DNS_RESOLVER` for a DNS resolver address in a subnetwork \n- `VPC_PEERING` for addresses that are reserved for VPC peer networks. \n- `NAT_AUTO` for addresses that are external IP addresses automatically reserved for Cloud NAT. \n- `IPSEC_INTERCONNECT` for addresses created from a private IP range that are reserved for a VLAN attachment in an IPsec-encrypted Cloud Interconnect configuration. These addresses are regional resources.", "enum": [ "DNS_RESOLVER", "GCE_ENDPOINT", + "IPSEC_INTERCONNECT", "NAT_AUTO", + "PRIVATE_SERVICE_CONNECT", "SHARED_LOADBALANCER_VIP", "VPC_PEERING" ], @@ -26793,12 +27675,14 @@ "", "", "", + "", + "", "" ], "type": "string" }, "region": { - "description": "[Output Only] The URL of the region where the regional address resides. This field is not applicable to global addresses. You must specify this field as part of the HTTP request URL.", + "description": "[Output Only] The URL of the region where a regional address resides. For regional addresses, you must specify the region as a path parameter in the HTTP request URL. This field is not applicable to global addresses.", "type": "string" }, "selfLink": { @@ -27237,6 +28121,10 @@ }, "type": "array" }, + "locationHint": { + "description": "An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API.", + "type": "string" + }, "machineType": { "description": "Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.", "type": "string" @@ -27411,6 +28299,11 @@ ], "type": "string" }, + "provisionedIops": { + "description": "Indicates how many IOPS must be provisioned for the disk.", + "format": "int64", + "type": "string" + }, "resourcePolicies": { "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", "items": { @@ -27566,6 +28459,13 @@ "description": "[Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope).", "type": "string" }, + "scalingScheduleStatus": { + "additionalProperties": { + "$ref": "ScalingScheduleStatus" + }, + "description": "[Output Only] Status information of existing scaling schedules.", + "type": "object" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -27868,6 +28768,8 @@ "NOT_ENOUGH_QUOTA_AVAILABLE", "REGION_RESOURCE_STOCKOUT", "SCALING_TARGET_DOES_NOT_EXIST", + "SCHEDULED_INSTANCES_GREATER_THAN_AUTOSCALER_MAX", + "SCHEDULED_INSTANCES_LESS_THAN_AUTOSCALER_MIN", "UNKNOWN", "UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION", "ZONE_RESOURCE_STOCKOUT" @@ -27890,6 +28792,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -28047,6 +28951,13 @@ }, "scaleInControl": { "$ref": "AutoscalingPolicyScaleInControl" + }, + "scalingSchedules": { + "additionalProperties": { + "$ref": "AutoscalingPolicyScalingSchedule" + }, + "description": "Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed.", + "type": "object" } }, "type": "object" @@ -28055,6 +28966,18 @@ "description": "CPU utilization policy.", "id": "AutoscalingPolicyCpuUtilization", "properties": { + "predictiveMethod": { + "description": "Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:\n\n* NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.", + "enum": [ + "NONE", + "OPTIMIZE_AVAILABILITY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "utilizationTarget": { "description": "The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6.\n\nIf the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", "format": "double", @@ -28130,6 +29053,39 @@ }, "type": "object" }, + "AutoscalingPolicyScalingSchedule": { + "description": "Scaling based on user-defined schedule. The message describes a single scaling schedule. A scaling schedule changes the minimum number of VM instances an autoscaler can recommend, which can trigger scaling out.", + "id": "AutoscalingPolicyScalingSchedule", + "properties": { + "description": { + "description": "A description of a scaling schedule.", + "type": "string" + }, + "disabled": { + "description": "A boolean value that specifies whether a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect. This field is optional, and its value is false by default.", + "type": "boolean" + }, + "durationSec": { + "description": "The duration of time intervals, in seconds, for which this scaling schedule is to run. The minimum allowed value is 300. This field is required.", + "format": "int32", + "type": "integer" + }, + "minRequiredReplicas": { + "description": "The minimum number of VM instances that the autoscaler will recommend in time intervals starting according to schedule. This field is required.", + "format": "int32", + "type": "integer" + }, + "schedule": { + "description": "The start timestamps of time intervals when this scaling schedule is to provide a scaling signal. This field uses the extended cron format (with an optional year field). The expression can describe a single timestamp if the optional year is set, in which case the scaling schedule runs once. The schedule is interpreted with respect to time_zone. This field is required. Note: These timestamps only describe when autoscaler starts providing the scaling signal. The VMs need additional time to become serving.", + "type": "string" + }, + "timeZone": { + "description": "The time zone to use when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. This field is assigned a default value of ?UTC? if left empty.", + "type": "string" + } + }, + "type": "object" + }, "Backend": { "description": "Message containing information of one individual backend.", "id": "Backend", @@ -28259,6 +29215,13 @@ "description": "Message containing Cloud CDN configuration for a backend bucket.", "id": "BackendBucketCdnPolicy", "properties": { + "bypassCacheOnRequestHeaders": { + "description": "Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.", + "items": { + "$ref": "BackendBucketCdnPolicyBypassCacheOnRequestHeader" + }, + "type": "array" + }, "cacheMode": { "description": "Specifies the cache setting for all responses from this backend. The possible values are:\n\nUSE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server.\n\nFORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content.\n\nCACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", "enum": [ @@ -28276,7 +29239,7 @@ "type": "string" }, "clientTtl": { - "description": "Specifies a separate client (e.g. browser client) TTL, separate from the TTL for Cloud CDN's edge caches. Leaving this empty will use the same cache TTL for both Cloud CDN and the client-facing response. The maximum allowed value is 86400s (1 day).", + "description": "Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a \"public\" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a \"public\" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 86400s (1 day).", "format": "int32", "type": "integer" }, @@ -28290,6 +29253,26 @@ "format": "int32", "type": "integer" }, + "negativeCaching": { + "description": "Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.", + "type": "boolean" + }, + "negativeCachingPolicy": { + "description": "Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.", + "items": { + "$ref": "BackendBucketCdnPolicyNegativeCachingPolicy" + }, + "type": "array" + }, + "requestCoalescing": { + "description": "If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.", + "type": "boolean" + }, + "serveWhileStale": { + "description": "Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default \"max-stale\" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.", + "format": "int32", + "type": "integer" + }, "signedUrlCacheMaxAgeSec": { "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", "format": "int64", @@ -28305,6 +29288,34 @@ }, "type": "object" }, + "BackendBucketCdnPolicyBypassCacheOnRequestHeader": { + "description": "Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.", + "id": "BackendBucketCdnPolicyBypassCacheOnRequestHeader", + "properties": { + "headerName": { + "description": "The header field name to match on when bypassing cache. Values are case-insensitive.", + "type": "string" + } + }, + "type": "object" + }, + "BackendBucketCdnPolicyNegativeCachingPolicy": { + "description": "Specify CDN TTLs for response error codes.", + "id": "BackendBucketCdnPolicyNegativeCachingPolicy", + "properties": { + "code": { + "description": "The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.", + "format": "int32", + "type": "integer" + }, + "ttl": { + "description": "The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "BackendBucketList": { "description": "Contains a list of BackendBucket resources.", "id": "BackendBucketList", @@ -28439,7 +29450,7 @@ }, "cdnPolicy": { "$ref": "BackendServiceCdnPolicy", - "description": "Cloud CDN configuration for this BackendService. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing." + "description": "Cloud CDN configuration for this BackendService. Only available for external HTTP(S) Load Balancing." }, "circuitBreakers": { "$ref": "CircuitBreakers", @@ -28552,6 +29563,10 @@ "$ref": "BackendServiceLogConfig", "description": "This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver." }, + "maxStreamDuration": { + "$ref": "Duration", + "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed.\nIf not specified, there will be no timeout limit, i.e. the maximum duration is infinite.\nThis field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -28566,7 +29581,7 @@ "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." }, "port": { - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used if the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing).", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nBackend services for Internal TCP/UDP Load Balancing and Network Load Balancing require you omit port.", "format": "int32", "type": "integer" }, @@ -28613,9 +29628,10 @@ "type": "string" }, "sessionAffinity": { - "description": "Type of session affinity to use. The default is NONE.\n\nWhen the loadBalancingScheme is EXTERNAL: * For Network Load Balancing, the possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. * For all other load balancers that use loadBalancingScheme=EXTERNAL, the possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. * You can use GENERATED_COOKIE if the protocol is HTTP, HTTP2, or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Type of session affinity to use. The default is NONE.\n\nWhen the loadBalancingScheme is EXTERNAL: * For Network Load Balancing, the possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. * For all other load balancers that use loadBalancingScheme=EXTERNAL, the possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. * You can use GENERATED_COOKIE if the protocol is HTTP, HTTP2, or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ "CLIENT_IP", + "CLIENT_IP_NO_DESTINATION", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", @@ -28630,6 +29646,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28770,6 +29787,13 @@ "description": "Message containing Cloud CDN configuration for a backend service.", "id": "BackendServiceCdnPolicy", "properties": { + "bypassCacheOnRequestHeaders": { + "description": "Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.", + "items": { + "$ref": "BackendServiceCdnPolicyBypassCacheOnRequestHeader" + }, + "type": "array" + }, "cacheKeyPolicy": { "$ref": "CacheKeyPolicy", "description": "The CacheKeyPolicy for this CdnPolicy." @@ -28791,7 +29815,7 @@ "type": "string" }, "clientTtl": { - "description": "Specifies a separate client (e.g. browser client) TTL, separate from the TTL for Cloud CDN's edge caches. Leaving this empty will use the same cache TTL for both Cloud CDN and the client-facing response. The maximum allowed value is 86400s (1 day).", + "description": "Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a \"public\" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a \"public\" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 86400s (1 day).", "format": "int32", "type": "integer" }, @@ -28805,6 +29829,26 @@ "format": "int32", "type": "integer" }, + "negativeCaching": { + "description": "Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.", + "type": "boolean" + }, + "negativeCachingPolicy": { + "description": "Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.", + "items": { + "$ref": "BackendServiceCdnPolicyNegativeCachingPolicy" + }, + "type": "array" + }, + "requestCoalescing": { + "description": "If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.", + "type": "boolean" + }, + "serveWhileStale": { + "description": "Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default \"max-stale\" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.", + "format": "int32", + "type": "integer" + }, "signedUrlCacheMaxAgeSec": { "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", "format": "int64", @@ -28820,6 +29864,34 @@ }, "type": "object" }, + "BackendServiceCdnPolicyBypassCacheOnRequestHeader": { + "description": "Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.", + "id": "BackendServiceCdnPolicyBypassCacheOnRequestHeader", + "properties": { + "headerName": { + "description": "The header field name to match on when bypassing cache. Values are case-insensitive.", + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceCdnPolicyNegativeCachingPolicy": { + "description": "Specify CDN TTLs for response error codes.", + "id": "BackendServiceCdnPolicyNegativeCachingPolicy", + "properties": { + "code": { + "description": "The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.", + "format": "int32", + "type": "integer" + }, + "ttl": { + "description": "The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "BackendServiceFailoverPolicy": { "description": "Applicable only to Failover for Internal TCP/UDP Load Balancing and Network Load Balancing. On failover or failback, this field indicates whether connection draining will be honored. GCP has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", @@ -29153,6 +30225,56 @@ }, "type": "object" }, + "BulkInsertInstanceResource": { + "id": "BulkInsertInstanceResource", + "properties": { + "count": { + "description": "The maximum number of instances to create.", + "format": "int64", + "type": "string" + }, + "instanceProperties": { + "$ref": "InstanceProperties", + "description": "The instance properties defining the VM instances to be created. Required if sourceInstanceTemplate is not provided." + }, + "locationPolicy": { + "$ref": "LocationPolicy", + "description": "Policy for chosing target zone." + }, + "minCount": { + "description": "The minimum number of instances to create. If no min_count is specified then count is used as the default value. If min_count instances cannot be created, then no instances will be created and instances already created will be deleted.", + "format": "int64", + "type": "string" + }, + "namePattern": { + "description": "The string pattern used for the names of the VMs. Either name_pattern or per_instance_properties must be set. The pattern should contain one continuous sequence of placeholder hash characters (#) with each character corresponding to one digit of the generated instance name. Example: name_pattern of inst-#### will generate instance names such as inst-0001, inst-0002, ... . If there already exist instance(s) whose names match the name pattern in the same project and zone, then the generated instance numbers will start after the biggest existing number. For example, if there exists an instance with name inst-0050, then instance names generated using the pattern inst-#### will be inst-0051, inst-0052, etc. The name pattern placeholder #...# can contain up to 18 characters.", + "type": "string" + }, + "perInstanceProperties": { + "additionalProperties": { + "$ref": "BulkInsertInstanceResourcePerInstanceProperties" + }, + "description": "Per-instance properties to be set on individual instances. Keys of this map specify requested instance names. Can be empty if name_pattern is used.", + "type": "object" + }, + "sourceInstanceTemplate": { + "description": "Specifies the instance template from which to create instances. You may combine sourceInstanceTemplate with instanceProperties to override specific values from an existing instance template. Bulk API follows the semantics of JSON Merge Patch described by RFC 7396.\n\nIt can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate \n\nThis field is optional.", + "type": "string" + } + }, + "type": "object" + }, + "BulkInsertInstanceResourcePerInstanceProperties": { + "description": "Per-instance properties to be set on individual instances. To be extended in the future.", + "id": "BulkInsertInstanceResourcePerInstanceProperties", + "properties": { + "name": { + "description": "This field is only temporary. It will be removed. Do not use it.", + "type": "string" + } + }, + "type": "object" + }, "CacheInvalidationRule": { "id": "CacheInvalidationRule", "properties": { @@ -30043,6 +31165,11 @@ "format": "int64", "type": "string" }, + "provisionedIops": { + "description": "Indicates how many IOPS must be provisioned for the disk.", + "format": "int64", + "type": "string" + }, "region": { "description": "[Output Only] URL of the region where the disk resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -30075,7 +31202,7 @@ "type": "string" }, "sourceDisk": { - "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/disks/disk \n- projects/project/zones/zone/disks/disk \n- projects/project/regions/region/disks/disk \n- zones/zone/disks/disk \n- regions/region/disks/disk", "type": "string" }, "sourceDiskId": { @@ -30981,6 +32108,20 @@ "DistributionPolicy": { "id": "DistributionPolicy", "properties": { + "targetShape": { + "description": "The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType).", + "enum": [ + "ANY", + "BALANCED", + "EVEN" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "zones": { "description": "Zones where the regional managed instance group will create and manage its instances.", "items": { @@ -31758,7 +32899,7 @@ "type": "string" }, "displayName": { - "description": "User-provided name of the Organization firewall plicy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Depreacted, please use short name instead. User-provided name of the Organization firewall plicy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -31804,6 +32945,11 @@ "selfLinkWithId": { "description": "[Output Only] Server-defined URL for this resource with the resource id.", "type": "string" + }, + "shortName": { + "description": "User-provided name of the Organization firewall plicy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" } }, "type": "object" @@ -31816,7 +32962,7 @@ "type": "string" }, "displayName": { - "description": "[Output Only] The display name of the firewall policy of the association.", + "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy of the association.", "type": "string" }, "firewallPolicyId": { @@ -31826,6 +32972,10 @@ "name": { "description": "The name for an association.", "type": "string" + }, + "shortName": { + "description": "[Output Only] The short name of the firewall policy of the association.", + "type": "string" } }, "type": "object" @@ -31950,7 +33100,7 @@ "type": "string" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description for this resource.", "type": "string" }, "direction": { @@ -32095,7 +33245,7 @@ "id": "ForwardingRule", "properties": { "IPAddress": { - "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: \n- projects/project_id/regions/region/addresses/address-name \n- regions/region/addresses/address-name \n- global/addresses/address-name \n- address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).\n\nMust be set to `0.0.0.0` when the target is targetGrpcProxy that has validateForProxyless field set to true.", + "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: \n- projects/project_id/regions/region/addresses/address-name \n- regions/region/addresses/address-name \n- global/addresses/address-name \n- address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).\n\nMust be set to `0.0.0.0` when the target is targetGrpcProxy that has validateForProxyless field set to true.\n\nFor Private Service Connect forwarding rules that forward traffic to Google APIs, IP address must be provided.", "type": "string" }, "IPProtocol": { @@ -32214,7 +33364,7 @@ "type": "string" }, "network": { - "description": "This field is not used for external load balancing.\n\nFor Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", + "description": "This field is not used for external load balancing.\n\nFor Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.\n\nFor Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -32240,6 +33390,11 @@ }, "type": "array" }, + "pscConnectionId": { + "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "format": "uint64", + "type": "string" + }, "region": { "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -32248,6 +33403,13 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "serviceDirectoryRegistrations": { + "description": "Service Directory resources to register this forwarding rule with. Currently, only supports a single Service Directory resource.\n\nIt is only supported for Internal TCP/UDP Load Balancing and Internal HTTP(S) Load Balancing.", + "items": { + "$ref": "ForwardingRuleServiceDirectoryRegistration" + }, + "type": "array" + }, "serviceLabel": { "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -32262,7 +33424,6 @@ "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For more information, see the \"Target\" column in [Port specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", "type": "string" } }, @@ -32516,6 +33677,25 @@ }, "type": "object" }, + "ForwardingRuleServiceDirectoryRegistration": { + "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "id": "ForwardingRuleServiceDirectoryRegistration", + "properties": { + "namespace": { + "description": "Service Directory namespace to register the forwarding rule under.", + "type": "string" + }, + "service": { + "description": "Service Directory service to register the forwarding rule under.", + "type": "string" + }, + "serviceDirectoryRegion": { + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", + "type": "string" + } + }, + "type": "object" + }, "ForwardingRulesScopedList": { "id": "ForwardingRulesScopedList", "properties": { @@ -33038,7 +34218,7 @@ "description": "Configure logging on this health check." }, "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -33660,6 +34840,14 @@ "description": "Metadata defined as annotations for network endpoint.", "type": "object" }, + "forwardingRule": { + "description": "URL of the forwarding rule associated with the health status of the instance.", + "type": "string" + }, + "forwardingRuleIp": { + "description": "A forwarding rule IP address assigned to this instance.", + "type": "string" + }, "healthState": { "description": "Health state of the instance.", "enum": [ @@ -33677,7 +34865,7 @@ "type": "string" }, "ipAddress": { - "description": "A forwarding rule IP address assigned to this instance.", + "description": "For target pool based Network Load Balancing, it indicates the forwarding rule's IP address assigned to this instance. For other types of load balancing, the field indicates VM internal ip.", "type": "string" }, "port": { @@ -34192,6 +35380,10 @@ "$ref": "HttpFaultInjection", "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, + "maxStreamDuration": { + "$ref": "Duration", + "description": "Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (i.e. end-of-stream), the duration in this field is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed.\nIf not specified, will use the largest maxStreamDuration among all backend services associated with the route.\nThis field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED." + }, "requestMirrorPolicy": { "$ref": "RequestMirrorPolicy", "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." @@ -34602,6 +35794,10 @@ }, "type": "object" }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -34957,6 +36153,20 @@ }, "type": "array" }, + "postKeyRevocationActionType": { + "description": "PostKeyRevocationActionType of the instance.", + "enum": [ + "NOOP", + "POST_KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", + "SHUTDOWN" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "privateIpv6GoogleAccess": { "description": "The private IPv6 google access type for the VM. If not specified, use INHERIT_FROM_SUBNETWORK as default.", "enum": [ @@ -37129,6 +38339,20 @@ }, "type": "array" }, + "postKeyRevocationActionType": { + "description": "PostKeyRevocationActionType of the instance.", + "enum": [ + "NOOP", + "POST_KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", + "SHUTDOWN" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "privateIpv6GoogleAccess": { "description": "The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default.", "enum": [ @@ -37410,6 +38634,63 @@ }, "type": "object" }, + "InstancesGetEffectiveFirewallsResponse": { + "id": "InstancesGetEffectiveFirewallsResponse", + "properties": { + "firewallPolicys": { + "description": "Effective firewalls from firewall policies.", + "items": { + "$ref": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" + }, + "type": "array" + }, + "firewalls": { + "description": "Effective firewalls on the instance.", + "items": { + "$ref": "Firewall" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { + "id": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", + "properties": { + "displayName": { + "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", + "type": "string" + }, + "name": { + "description": "[Output Only] The name of the firewall policy.", + "type": "string" + }, + "rules": { + "description": "The rules that apply to the network.", + "items": { + "$ref": "FirewallPolicyRule" + }, + "type": "array" + }, + "shortName": { + "description": "[Output Only] The short name of the firewall policy.", + "type": "string" + }, + "type": { + "description": "[Output Only] The type of the firewall policy.", + "enum": [ + "HIERARCHY", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "InstancesRemoveResourcePoliciesRequest": { "id": "InstancesRemoveResourcePoliciesRequest", "properties": { @@ -37848,6 +39129,18 @@ ], "type": "string" }, + "encryption": { + "description": "Indicates the user-supplied encryption option of this interconnect attachment: \n- NONE is the default value, which means that the attachment carries unencrypted traffic. VMs can send traffic to, or receive traffic from, this type of attachment. \n- IPSEC indicates that the attachment carries only traffic encrypted by an IPsec device such as an HA VPN gateway. VMs cannot directly send traffic to, or receive traffic from, such an attachment. To use IPsec-encrypted Cloud Interconnect, create the attachment using this option. \nNot currently available in all Interconnect locations.", + "enum": [ + "IPSEC", + "NONE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "googleReferenceId": { "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues. [Deprecated] This field is not used.", "type": "string" @@ -37861,6 +39154,13 @@ "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through.", "type": "string" }, + "ipsecInternalAddresses": { + "description": "URL of addresses that have been reserved for the interconnect attachment, Used only for interconnect attachment that has the encryption option as IPSEC. The addresses must be RFC 1918 IP address ranges. When creating HA VPN gateway over the interconnect attachment, if the attachment is configured to use an RFC 1918 IP address, then the VPN gateway?s IP address will be allocated from the IP address range specified here. For example, if the HA VPN gateway?s interface 0 is paired to this interconnect attachment, then an RFC 1918 IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this interconnect attachment. If this field is not specified for interconnect attachment that has encryption option as IPSEC, later on when creating HA VPN gateway on this interconnect attachment, the HA VPN gateway's IP address will be allocated from regional external IP address pool.\nNot currently available in all Interconnect locations.", + "items": { + "type": "string" + }, + "type": "array" + }, "kind": { "default": "compute#interconnectAttachment", "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", @@ -39234,6 +40534,40 @@ }, "type": "object" }, + "LocationPolicy": { + "description": "Configuration for location policy among multiple possible locations (e.g. preferences for zone selection among zones in a single region).", + "id": "LocationPolicy", + "properties": { + "locations": { + "additionalProperties": { + "$ref": "LocationPolicyLocation" + }, + "description": "Location configurations mapped by location name. Currently only zone names are supported and must be represented as valid internal URLs, such as zones/us-central1-a.", + "type": "object" + } + }, + "type": "object" + }, + "LocationPolicyLocation": { + "id": "LocationPolicyLocation", + "properties": { + "preference": { + "description": "Preference for a given locaction: ALLOW or DENY.", + "enum": [ + "ALLOW", + "DENY", + "PREFERENCE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "LogConfig": { "description": "Specifies what kind of log the caller must write", "id": "LogConfig", @@ -40212,6 +41546,7 @@ "networkEndpointType": { "description": "Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, or SERVERLESS.", "enum": [ + "GCE_VM_IP", "GCE_VM_IP_PORT", "INTERNET_FQDN_PORT", "INTERNET_IP_PORT", @@ -40223,6 +41558,7 @@ "", "", "", + "", "" ], "type": "string" @@ -40831,7 +42167,7 @@ "type": "array" }, "fingerprint": { - "description": "Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface, otherwise the request will fail with error 412 conditionNotMet.", + "description": "Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface. The request will fail with error 400 Bad Request if the fingerprint is not provided, or 412 Precondition Failed if the fingerprint is out of date.", "format": "byte", "type": "string" }, @@ -41099,6 +42435,65 @@ }, "type": "object" }, + "NetworksGetEffectiveFirewallsResponse": { + "id": "NetworksGetEffectiveFirewallsResponse", + "properties": { + "firewallPolicys": { + "description": "Effective firewalls from firewall policy.", + "items": { + "$ref": "NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy" + }, + "type": "array" + }, + "firewalls": { + "description": "Effective firewalls on the network.", + "items": { + "$ref": "Firewall" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { + "id": "NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy", + "properties": { + "displayName": { + "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", + "type": "string" + }, + "name": { + "description": "[Output Only] The name of the firewall policy.", + "type": "string" + }, + "rules": { + "description": "The rules that apply to the network.", + "items": { + "$ref": "FirewallPolicyRule" + }, + "type": "array" + }, + "shortName": { + "description": "[Output Only] The short name of the firewall policy.", + "type": "string" + }, + "type": { + "description": "[Output Only] The type of the firewall policy.", + "enum": [ + "HIERARCHY", + "NETWORK", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "NetworksRemovePeeringRequest": { "id": "NetworksRemovePeeringRequest", "properties": { @@ -41148,6 +42543,10 @@ "description": "[Output Only] The type of the resource. Always compute#nodeGroup for node group.", "type": "string" }, + "locationHint": { + "description": "An opaque location hint used to place the Node close to other resources. This field is for use by internal tools that use the public API. The location hint here on the NodeGroup overrides any location_hint present in the NodeTemplate.", + "type": "string" + }, "maintenancePolicy": { "description": "Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT. For more information, see Maintenance policies.", "enum": [ @@ -41538,6 +42937,10 @@ "description": "The type of this node.", "type": "string" }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "serverBinding": { "$ref": "ServerBinding", "description": "Binding properties for the physical server." @@ -42924,6 +44327,10 @@ "description": "[Output Only] Name of the operation.", "type": "string" }, + "operationGroupId": { + "description": "[Output Only] An ID that represents a group of operations, such as when a group of operations results from a `bulkInsert` API request.", + "type": "string" + }, "operationType": { "description": "[Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on.", "type": "string" @@ -43078,144 +44485,1411 @@ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "type": "string" }, - "items": { - "additionalProperties": { - "$ref": "OperationsScopedList", - "description": "[Output Only] Name of the scope containing this set of operations." - }, - "description": "[Output Only] A map of scoped operation lists.", - "type": "object" + "items": { + "additionalProperties": { + "$ref": "OperationsScopedList", + "description": "[Output Only] Name of the scope containing this set of operations." + }, + "description": "[Output Only] A map of scoped operation lists.", + "type": "object" + }, + "kind": { + "default": "compute#operationAggregatedList", + "description": "[Output Only] Type of resource. Always `compute#operationAggregatedList` for aggregated lists of operations.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than `maxResults`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "OperationList": { + "description": "Contains a list of Operation resources.", + "id": "OperationList", + "properties": { + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "type": "string" + }, + "items": { + "description": "[Output Only] A list of Operation resources.", + "items": { + "$ref": "Operation" + }, + "type": "array" + }, + "kind": { + "default": "compute#operationList", + "description": "[Output Only] Type of resource. Always `compute#operations` for Operations resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than `maxResults`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "OperationsScopedList": { + "id": "OperationsScopedList", + "properties": { + "operations": { + "description": "[Output Only] A list of operations contained in this scope.", + "items": { + "$ref": "Operation" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "OutlierDetection": { + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service.", + "id": "OutlierDetection", + "properties": { + "baseEjectionTime": { + "$ref": "Duration", + "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." + }, + "consecutiveErrors": { + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "consecutiveGatewayFailure": { + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveErrors": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveGatewayFailure": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "enforcingSuccessRate": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "interval": { + "$ref": "Duration", + "description": "Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second." + }, + "maxEjectionPercent": { + "description": "Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%.", + "format": "int32", + "type": "integer" + }, + "successRateMinimumHosts": { + "description": "The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "successRateRequestVolume": { + "description": "The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "successRateStdevFactor": { + "description": "This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "PacketMirroring": { + "description": "Represents a Packet Mirroring resource.\n\nPacket Mirroring clones the traffic of specified instances in your Virtual Private Cloud (VPC) network and forwards it to a collector destination, such as an instance group of an internal TCP/UDP load balancer, for analysis or examination. For more information about setting up Packet Mirroring, see Using Packet Mirroring. (== resource_for {$api_version}.packetMirrorings ==)", + "id": "PacketMirroring", + "properties": { + "collectorIlb": { + "$ref": "PacketMirroringForwardingRuleInfo", + "description": "The Forwarding Rule resource of type loadBalancingScheme=INTERNAL that will be used as collector for mirrored traffic. The specified forwarding rule must have isMirroringCollector set to true." + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "enable": { + "description": "Indicates whether or not this packet mirroring takes effect. If set to FALSE, this packet mirroring policy will not be enforced on the network.\n\nThe default is TRUE.", + "enum": [ + "FALSE", + "TRUE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "filter": { + "$ref": "PacketMirroringFilter", + "description": "Filter for mirrored traffic. If unspecified, all traffic is mirrored." + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#packetMirroring", + "description": "[Output Only] Type of the resource. Always compute#packetMirroring for packet mirrorings.", + "type": "string" + }, + "mirroredResources": { + "$ref": "PacketMirroringMirroredResourceInfo", + "description": "PacketMirroring mirroredResourceInfos. MirroredResourceInfo specifies a set of mirrored VM instances, subnetworks and/or tags for which traffic from/to all VM instances will be mirrored." + }, + "name": { + "annotations": { + "required": [ + "compute.packetMirrorings.insert" + ] + }, + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "$ref": "PacketMirroringNetworkInfo", + "annotations": { + "required": [ + "compute.packetMirrorings.insert" + ] + }, + "description": "Specifies the mirrored VPC network. Only packets in this network will be mirrored. All mirrored VMs should have a NIC in the given network. All mirrored subnetworks should belong to the given network." + }, + "priority": { + "description": "The priority of applying this configuration. Priority is used to break ties in cases where there is more than one matching rule. In the case of two rules that apply for a given Instance, the one with the lowest-numbered priority value wins.\n\nDefault value is 1000. Valid range is 0 through 65535.", + "format": "uint32", + "type": "integer" + }, + "region": { + "description": "[Output Only] URI of the region where the packetMirroring resides.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringAggregatedList": { + "description": "Contains a list of packetMirrorings.", + "id": "PacketMirroringAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "PacketMirroringsScopedList", + "description": "Name of the scope containing this set of packetMirrorings." + }, + "description": "A list of PacketMirroring resources.", + "type": "object" + }, + "kind": { + "default": "compute#packetMirroringAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "PacketMirroringFilter": { + "id": "PacketMirroringFilter", + "properties": { + "IPProtocols": { + "description": "Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.", + "items": { + "type": "string" + }, + "type": "array" + }, + "cidrRanges": { + "description": "IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.", + "items": { + "type": "string" + }, + "type": "array" + }, + "direction": { + "description": "Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH.", + "enum": [ + "BOTH", + "EGRESS", + "INGRESS" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringForwardingRuleInfo": { + "id": "PacketMirroringForwardingRuleInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the forwarding rule; defined by the server.", + "type": "string" + }, + "url": { + "description": "Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringList": { + "description": "Contains a list of PacketMirroring resources.", + "id": "PacketMirroringList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of PacketMirroring resources.", + "items": { + "$ref": "PacketMirroring" + }, + "type": "array" + }, + "kind": { + "default": "compute#packetMirroringList", + "description": "[Output Only] Type of resource. Always compute#packetMirroring for packetMirrorings.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "PacketMirroringMirroredResourceInfo": { + "id": "PacketMirroringMirroredResourceInfo", + "properties": { + "instances": { + "description": "A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring.\n\nNote that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring.\n\nYou may specify a maximum of 50 Instances.", + "items": { + "$ref": "PacketMirroringMirroredResourceInfoInstanceInfo" + }, + "type": "array" + }, + "subnetworks": { + "description": "A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring.\n\nYou may specify a maximum of 5 subnetworks.", + "items": { + "$ref": "PacketMirroringMirroredResourceInfoSubnetInfo" + }, + "type": "array" + }, + "tags": { + "description": "A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "PacketMirroringMirroredResourceInfoInstanceInfo": { + "id": "PacketMirroringMirroredResourceInfoInstanceInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the instance; defined by the server.", + "type": "string" + }, + "url": { + "description": "Resource URL to the virtual machine instance which is being mirrored.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringMirroredResourceInfoSubnetInfo": { + "id": "PacketMirroringMirroredResourceInfoSubnetInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the subnetwork; defined by the server.", + "type": "string" + }, + "url": { + "description": "Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringNetworkInfo": { + "id": "PacketMirroringNetworkInfo", + "properties": { + "canonicalUrl": { + "description": "[Output Only] Unique identifier for the network; defined by the server.", + "type": "string" + }, + "url": { + "description": "URL of the network resource.", + "type": "string" + } + }, + "type": "object" + }, + "PacketMirroringsScopedList": { + "id": "PacketMirroringsScopedList", + "properties": { + "packetMirrorings": { + "description": "A list of packetMirrorings contained in this scope.", + "items": { + "$ref": "PacketMirroring" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of packetMirrorings when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "PathMatcher": { + "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", + "id": "PathMatcher", + "properties": { + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathMatcher's defaultRouteAction." + }, + "defaultService": { + "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", + "type": "string" + }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." + }, + "name": { + "description": "The name to which this PathMatcher is referred by the HostRule.", + "type": "string" + }, + "pathRules": { + "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nWithin a given pathMatcher, only one of pathRules or routeRules must be set.", + "items": { + "$ref": "PathRule" + }, + "type": "array" + }, + "routeRules": { + "description": "The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number.\nWithin a given pathMatcher, you can set only one of pathRules or routeRules.", + "items": { + "$ref": "HttpRouteRule" + }, + "type": "array" + } + }, + "type": "object" + }, + "PathRule": { + "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", + "id": "PathRule", + "properties": { + "paths": { + "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", + "items": { + "type": "string" + }, + "type": "array" + }, + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathRule's routeAction." + }, + "service": { + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", + "type": "string" + }, + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." + } + }, + "type": "object" + }, + "PerInstanceConfig": { + "id": "PerInstanceConfig", + "properties": { + "fingerprint": { + "description": "Fingerprint of this per-instance config. This field can be used in optimistic locking. It is ignored when inserting a per-instance config. An up-to-date fingerprint must be provided in order to update an existing per-instance config or the field needs to be unset.", + "format": "byte", + "type": "string" + }, + "name": { + "description": "The name of a per-instance config and its corresponding instance. Serves as a merge key during UpdatePerInstanceConfigs operations, that is, if a per-instance config with the same name exists then it will be updated, otherwise a new one will be created for the VM instance with the same name. An attempt to create a per-instance config for a VM instance that either doesn't exist or is not part of the group will result in an error.", + "type": "string" + }, + "preservedState": { + "$ref": "PreservedState", + "description": "The intended preserved state for the given instance. Does not contain preserved state generated from a stateful policy." + }, + "status": { + "description": "The status of applying this per-instance config on the corresponding managed instance.", + "enum": [ + "APPLYING", + "DELETING", + "EFFECTIVE", + "NONE", + "UNAPPLIED", + "UNAPPLIED_DELETION" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "Policy": { + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.\n\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n{ \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 }\n\n**YAML example:**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3\n\nFor a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", + "id": "Policy", + "properties": { + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", + "items": { + "$ref": "AuditConfig" + }, + "type": "array" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", + "items": { + "$ref": "Binding" + }, + "type": "array" + }, + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", + "format": "byte", + "type": "string" + }, + "iamOwned": { + "description": "", + "type": "boolean" + }, + "rules": { + "description": "If more than one rule is specified, the rules are applied in the following manner: - All matching LOG rules are always applied. - If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be applied if one or more matching rule requires logging. - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging will be applied if one or more matching rule requires logging. - Otherwise, if no rule applies, permission is denied.", + "items": { + "$ref": "Rule" + }, + "type": "array" + }, + "version": { + "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected.\n\nAny operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "PreconfiguredWafSet": { + "id": "PreconfiguredWafSet", + "properties": { + "expressionSets": { + "description": "List of entities that are currently supported for WAF rules.", + "items": { + "$ref": "WafExpressionSet" + }, + "type": "array" + } + }, + "type": "object" + }, + "PreservedState": { + "description": "Preserved state for a given instance.", + "id": "PreservedState", + "properties": { + "disks": { + "additionalProperties": { + "$ref": "PreservedStatePreservedDisk" + }, + "description": "Preserved disks defined for this instance. This map is keyed with the device names of the disks.", + "type": "object" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Preserved metadata defined for this instance.", + "type": "object" + } + }, + "type": "object" + }, + "PreservedStatePreservedDisk": { + "id": "PreservedStatePreservedDisk", + "properties": { + "autoDelete": { + "description": "These stateful disks will never be deleted during autohealing, update, instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole MIG is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", + "enum": [ + "NEVER", + "ON_PERMANENT_INSTANCE_DELETION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "mode": { + "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", + "enum": [ + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "source": { + "description": "The URL of the disk resource that is stateful and should be attached to the VM instance.", + "type": "string" + } + }, + "type": "object" + }, + "Project": { + "description": "Represents a Project resource.\n\nA project is used to organize resources in a Google Cloud Platform environment. For more information, read about the Resource Hierarchy. (== resource_for {$api_version}.projects ==)", + "id": "Project", + "properties": { + "commonInstanceMetadata": { + "$ref": "Metadata", + "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "defaultNetworkTier": { + "description": "This signifies the default network tier used for configuring resources of the project and can only take the following values: PREMIUM, STANDARD. Initially the default network tier is PREMIUM.", + "enum": [ + "PREMIUM", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "defaultServiceAccount": { + "description": "[Output Only] Default service account used by VMs running in this project.", + "type": "string" + }, + "description": { + "description": "An optional textual description of the resource.", + "type": "string" + }, + "enabledFeatures": { + "description": "Restricted features enabled for use on this project.", + "items": { + "type": "string" + }, + "type": "array" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#project", + "description": "[Output Only] Type of the resource. Always compute#project for projects.", + "type": "string" + }, + "name": { + "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine.", + "type": "string" + }, + "quotas": { + "description": "[Output Only] Quotas assigned to this project.", + "items": { + "$ref": "Quota" + }, + "type": "array" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "usageExportLocation": { + "$ref": "UsageExportLocation", + "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." + }, + "xpnProjectStatus": { + "description": "[Output Only] The role this project has in a shared VPC configuration. Currently, only projects with the host role, which is specified by the value HOST, are differentiated.", + "enum": [ + "HOST", + "UNSPECIFIED_XPN_PROJECT_STATUS" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ProjectsDisableXpnResourceRequest": { + "id": "ProjectsDisableXpnResourceRequest", + "properties": { + "xpnResource": { + "$ref": "XpnResourceId", + "description": "Service resource (a.k.a service project) ID." + } + }, + "type": "object" + }, + "ProjectsEnableXpnResourceRequest": { + "id": "ProjectsEnableXpnResourceRequest", + "properties": { + "xpnResource": { + "$ref": "XpnResourceId", + "description": "Service resource (a.k.a service project) ID." + } + }, + "type": "object" + }, + "ProjectsGetXpnResources": { + "id": "ProjectsGetXpnResources", + "properties": { + "kind": { + "default": "compute#projectsGetXpnResources", + "description": "[Output Only] Type of resource. Always compute#projectsGetXpnResources for lists of service resources (a.k.a service projects)", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "resources": { + "description": "Service resources (a.k.a service projects) attached to this project as their shared VPC host.", + "items": { + "$ref": "XpnResourceId" + }, + "type": "array" + } + }, + "type": "object" + }, + "ProjectsListXpnHostsRequest": { + "id": "ProjectsListXpnHostsRequest", + "properties": { + "organization": { + "description": "Optional organization ID managed by Cloud Resource Manager, for which to list shared VPC host projects. If not specified, the organization will be inferred from the project.", + "type": "string" + } + }, + "type": "object" + }, + "ProjectsSetDefaultNetworkTierRequest": { + "id": "ProjectsSetDefaultNetworkTierRequest", + "properties": { + "networkTier": { + "description": "Default network tier to be set.", + "enum": [ + "PREMIUM", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "PublicAdvertisedPrefix": { + "description": "A public advertised prefix represents an aggregated IP prefix or netblock which customers bring to cloud. The IP prefix is a single unit of route advertisement and is announced globally to the internet.", + "id": "PublicAdvertisedPrefix", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "dnsVerificationIp": { + "description": "The IPv4 address to be used for reverse DNS verification.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a new PublicAdvertisedPrefix. An up-to-date fingerprint must be provided in order to update the PublicAdvertisedPrefix, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a PublicAdvertisedPrefix.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource type. The server generates this identifier.", + "format": "uint64", + "type": "string" }, - "kind": { - "default": "compute#operationAggregatedList", - "description": "[Output Only] Type of resource. Always `compute#operationAggregatedList` for aggregated lists of operations.", + "ipCidrRange": { + "description": "The IPv4 address range, in CIDR format, represented by this public advertised prefix.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than `maxResults`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results.", + "kind": { + "default": "compute#publicAdvertisedPrefix", + "description": "[Output Only] Type of the resource. Always compute#publicAdvertisedPrefix for public advertised prefixes.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "name": { + "annotations": { + "required": [ + "compute.publicAdvertisedPrefixes.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", + "publicDelegatedPrefixs": { + "description": "[Output Only] The list of public delegated prefixes that exist for this public advertised prefix.", "items": { - "type": "string" + "$ref": "PublicAdvertisedPrefixPublicDelegatedPrefix" }, "type": "array" }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "sharedSecret": { + "description": "[Output Only] The shared secret to be used for reverse DNS verification.", + "type": "string" + }, + "status": { + "description": "The status of the public advertised prefix.", + "enum": [ + "INITIAL", + "PREFIX_CONFIGURATION_COMPLETE", + "PREFIX_CONFIGURATION_IN_PROGRESS", + "PREFIX_REMOVAL_IN_PROGRESS", + "PTR_CONFIGURED", + "REVERSE_DNS_LOOKUP_FAILED", + "VALIDATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" } }, "type": "object" }, - "OperationList": { - "description": "Contains a list of Operation resources.", - "id": "OperationList", + "PublicAdvertisedPrefixList": { + "id": "PublicAdvertisedPrefixList", "properties": { "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "[Output Only] A list of Operation resources.", + "description": "A list of PublicAdvertisedPrefix resources.", "items": { - "$ref": "Operation" + "$ref": "PublicAdvertisedPrefix" }, "type": "array" }, "kind": { - "default": "compute#operationList", - "description": "[Output Only] Type of resource. Always `compute#operations` for Operations resource.", + "default": "compute#publicAdvertisedPrefixList", + "description": "[Output Only] Type of the resource. Always compute#publicAdvertisedPrefix for public advertised prefixes.", "type": "string" }, "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than `maxResults`, use the `nextPageToken` as a value for the query parameter `pageToken` in the next list request. Subsequent list requests will have their own `nextPageToken` to continue paging through the results.", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, "selfLink": { @@ -43310,172 +45984,37 @@ }, "type": "object" }, - "OperationsScopedList": { - "id": "OperationsScopedList", - "properties": { - "operations": { - "description": "[Output Only] A list of operations contained in this scope.", - "items": { - "$ref": "Operation" - }, - "type": "array" - }, - "warning": { - "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "OutlierDetection": { - "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service.", - "id": "OutlierDetection", + "PublicAdvertisedPrefixPublicDelegatedPrefix": { + "description": "Represents a CIDR range which can be used to assign addresses.", + "id": "PublicAdvertisedPrefixPublicDelegatedPrefix", "properties": { - "baseEjectionTime": { - "$ref": "Duration", - "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." - }, - "consecutiveErrors": { - "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", - "format": "int32", - "type": "integer" - }, - "consecutiveGatewayFailure": { - "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", - "format": "int32", - "type": "integer" - }, - "enforcingConsecutiveErrors": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", - "format": "int32", - "type": "integer" - }, - "enforcingConsecutiveGatewayFailure": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", - "format": "int32", - "type": "integer" - }, - "enforcingSuccessRate": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", - "format": "int32", - "type": "integer" - }, - "interval": { - "$ref": "Duration", - "description": "Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second." + "ipRange": { + "description": "The IP address range of the public delegated prefix", + "type": "string" }, - "maxEjectionPercent": { - "description": "Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%.", - "format": "int32", - "type": "integer" + "name": { + "description": "The name of the public delegated prefix", + "type": "string" }, - "successRateMinimumHosts": { - "description": "The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.", - "format": "int32", - "type": "integer" + "project": { + "description": "The project number of the public delegated prefix", + "type": "string" }, - "successRateRequestVolume": { - "description": "The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.", - "format": "int32", - "type": "integer" + "region": { + "description": "The region of the public delegated prefix if it is regional. If absent, the prefix is global.", + "type": "string" }, - "successRateStdevFactor": { - "description": "This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.", - "format": "int32", - "type": "integer" + "status": { + "description": "The status of the public delegated prefix. Possible values are: INITIALIZING: The public delegated prefix is being initialized and addresses cannot be created yet. ANNOUNCED: The public delegated prefix is active.", + "type": "string" } }, "type": "object" }, - "PacketMirroring": { - "description": "Represents a Packet Mirroring resource.\n\nPacket Mirroring clones the traffic of specified instances in your Virtual Private Cloud (VPC) network and forwards it to a collector destination, such as an instance group of an internal TCP/UDP load balancer, for analysis or examination. For more information about setting up Packet Mirroring, see Using Packet Mirroring. (== resource_for {$api_version}.packetMirrorings ==)", - "id": "PacketMirroring", + "PublicDelegatedPrefix": { + "description": "A PublicDelegatedPrefix resource represents an IP block within a PublicAdvertisedPrefix that is configured within a single cloud scope (global or region). IPs in the block can be allocated to resources within that scope. Public delegated prefixes may be further broken up into smaller IP blocks in the same scope as the parent block.", + "id": "PublicDelegatedPrefix", "properties": { - "collectorIlb": { - "$ref": "PacketMirroringForwardingRuleInfo", - "description": "The Forwarding Rule resource of type loadBalancingScheme=INTERNAL that will be used as collector for mirrored traffic. The specified forwarding rule must have isMirroringCollector set to true." - }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -43484,74 +46023,77 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "enable": { - "description": "Indicates whether or not this packet mirroring takes effect. If set to FALSE, this packet mirroring policy will not be enforced on the network.\n\nThe default is TRUE.", - "enum": [ - "FALSE", - "TRUE" - ], - "enumDescriptions": [ - "", - "" - ], + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a new PublicDelegatedPrefix. An up-to-date fingerprint must be provided in order to update the PublicDelegatedPrefix, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a PublicDelegatedPrefix.", + "format": "byte", "type": "string" }, - "filter": { - "$ref": "PacketMirroringFilter", - "description": "Filter for mirrored traffic. If unspecified, all traffic is mirrored." - }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] The unique identifier for the resource type. The server generates this identifier.", "format": "uint64", "type": "string" }, - "kind": { - "default": "compute#packetMirroring", - "description": "[Output Only] Type of the resource. Always compute#packetMirroring for packet mirrorings.", + "ipCidrRange": { + "description": "The IPv4 address range, in CIDR format, represented by this public delegated prefix.", "type": "string" }, - "mirroredResources": { - "$ref": "PacketMirroringMirroredResourceInfo", - "description": "PacketMirroring mirroredResourceInfos. MirroredResourceInfo specifies a set of mirrored VM instances, subnetworks and/or tags for which traffic from/to all VM instances will be mirrored." + "isLiveMigration": { + "description": "If true, the prefix will be live migrated.", + "type": "boolean" + }, + "kind": { + "default": "compute#publicDelegatedPrefix", + "description": "[Output Only] Type of the resource. Always compute#publicDelegatedPrefix for public delegated prefixes.", + "type": "string" }, "name": { "annotations": { "required": [ - "compute.packetMirrorings.insert" + "compute.publicDelegatedPrefixes.insert" ] }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "network": { - "$ref": "PacketMirroringNetworkInfo", - "annotations": { - "required": [ - "compute.packetMirrorings.insert" - ] - }, - "description": "Specifies the mirrored VPC network. Only packets in this network will be mirrored. All mirrored VMs should have a NIC in the given network. All mirrored subnetworks should belong to the given network." + "parentPrefix": { + "description": "The URL of parent prefix. Either PublicAdvertisedPrefix or PublicDelegatedPrefix.", + "type": "string" }, - "priority": { - "description": "The priority of applying this configuration. Priority is used to break ties in cases where there is more than one matching rule. In the case of two rules that apply for a given Instance, the one with the lowest-numbered priority value wins.\n\nDefault value is 1000. Valid range is 0 through 65535.", - "format": "uint32", - "type": "integer" + "publicDelegatedSubPrefixs": { + "description": "The list of sub public delegated prefixes that exist for this public delegated prefix.", + "items": { + "$ref": "PublicDelegatedPrefixPublicDelegatedSubPrefix" + }, + "type": "array" }, "region": { - "description": "[Output Only] URI of the region where the packetMirroring resides.", + "description": "[Output Only] URL of the region where the public delegated prefix resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "status": { + "description": "[Output Only] The status of the public delegated prefix.", + "enum": [ + "ANNOUNCED", + "DELETING", + "INITIALIZING" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" } }, "type": "object" }, - "PacketMirroringAggregatedList": { - "description": "Contains a list of packetMirrorings.", - "id": "PacketMirroringAggregatedList", + "PublicDelegatedPrefixAggregatedList": { + "id": "PublicDelegatedPrefixAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -43559,15 +46101,15 @@ }, "items": { "additionalProperties": { - "$ref": "PacketMirroringsScopedList", - "description": "Name of the scope containing this set of packetMirrorings." + "$ref": "PublicDelegatedPrefixesScopedList", + "description": "[Output Only] Name of the scope containing this set of PublicDelegatedPrefixes." }, - "description": "A list of PacketMirroring resources.", + "description": "A list of PublicDelegatedPrefixesScopedList resources.", "type": "object" }, "kind": { - "default": "compute#packetMirroringAggregatedList", - "description": "Type of resource.", + "default": "compute#publicDelegatedPrefixAggregatedList", + "description": "[Output Only] Type of the resource. Always compute#publicDelegatedPrefixAggregatedList for aggregated lists of public delegated prefixes.", "type": "string" }, "nextPageToken": { @@ -43673,72 +46215,23 @@ }, "type": "object" }, - "PacketMirroringFilter": { - "id": "PacketMirroringFilter", - "properties": { - "IPProtocols": { - "description": "Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.", - "items": { - "type": "string" - }, - "type": "array" - }, - "cidrRanges": { - "description": "IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.", - "items": { - "type": "string" - }, - "type": "array" - }, - "direction": { - "description": "Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH.", - "enum": [ - "BOTH", - "EGRESS", - "INGRESS" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "PacketMirroringForwardingRuleInfo": { - "id": "PacketMirroringForwardingRuleInfo", - "properties": { - "canonicalUrl": { - "description": "[Output Only] Unique identifier for the forwarding rule; defined by the server.", - "type": "string" - }, - "url": { - "description": "Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic.", - "type": "string" - } - }, - "type": "object" - }, - "PacketMirroringList": { - "description": "Contains a list of PacketMirroring resources.", - "id": "PacketMirroringList", + "PublicDelegatedPrefixList": { + "id": "PublicDelegatedPrefixList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of PacketMirroring resources.", + "description": "A list of PublicDelegatedPrefix resources.", "items": { - "$ref": "PacketMirroring" + "$ref": "PublicDelegatedPrefix" }, "type": "array" }, "kind": { - "default": "compute#packetMirroringList", - "description": "[Output Only] Type of resource. Always compute#packetMirroring for packetMirrorings.", + "default": "compute#publicDelegatedPrefixList", + "description": "[Output Only] Type of the resource. Always compute#publicDelegatedPrefixList for public delegated prefixes.", "type": "string" }, "nextPageToken": { @@ -43837,87 +46330,61 @@ }, "type": "object" }, - "PacketMirroringMirroredResourceInfo": { - "id": "PacketMirroringMirroredResourceInfo", - "properties": { - "instances": { - "description": "A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring.\n\nNote that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring.\n\nYou may specify a maximum of 50 Instances.", - "items": { - "$ref": "PacketMirroringMirroredResourceInfoInstanceInfo" - }, - "type": "array" - }, - "subnetworks": { - "description": "A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring.\n\nYou may specify a maximum of 5 subnetworks.", - "items": { - "$ref": "PacketMirroringMirroredResourceInfoSubnetInfo" - }, - "type": "array" - }, - "tags": { - "description": "A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "PacketMirroringMirroredResourceInfoInstanceInfo": { - "id": "PacketMirroringMirroredResourceInfoInstanceInfo", + "PublicDelegatedPrefixPublicDelegatedSubPrefix": { + "description": "Represents a sub PublicDelegatedPrefix.", + "id": "PublicDelegatedPrefixPublicDelegatedSubPrefix", "properties": { - "canonicalUrl": { - "description": "[Output Only] Unique identifier for the instance; defined by the server.", + "delegateeProject": { + "description": "Name of the project scoping this PublicDelegatedSubPrefix.", "type": "string" }, - "url": { - "description": "Resource URL to the virtual machine instance which is being mirrored.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" - } - }, - "type": "object" - }, - "PacketMirroringMirroredResourceInfoSubnetInfo": { - "id": "PacketMirroringMirroredResourceInfoSubnetInfo", - "properties": { - "canonicalUrl": { - "description": "[Output Only] Unique identifier for the subnetwork; defined by the server.", + }, + "ipCidrRange": { + "description": "The IPv4 address range, in CIDR format, represented by this sub public delegated prefix.", "type": "string" }, - "url": { - "description": "Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored.", + "isAddress": { + "description": "Whether the sub prefix is delegated to create Address resources in the delegatee project.", + "type": "boolean" + }, + "name": { + "description": "The name of the sub public delegated prefix.", "type": "string" - } - }, - "type": "object" - }, - "PacketMirroringNetworkInfo": { - "id": "PacketMirroringNetworkInfo", - "properties": { - "canonicalUrl": { - "description": "[Output Only] Unique identifier for the network; defined by the server.", + }, + "region": { + "description": "[Output Only] The region of the sub public delegated prefix if it is regional. If absent, the sub prefix is global.", "type": "string" }, - "url": { - "description": "URL of the network resource.", + "status": { + "description": "[Output Only] The status of the sub public delegated prefix.", + "enum": [ + "ACTIVE", + "INACTIVE" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" } }, "type": "object" }, - "PacketMirroringsScopedList": { - "id": "PacketMirroringsScopedList", + "PublicDelegatedPrefixesScopedList": { + "id": "PublicDelegatedPrefixesScopedList", "properties": { - "packetMirrorings": { - "description": "A list of packetMirrorings contained in this scope.", + "publicDelegatedPrefixes": { + "description": "[Output Only] A list of PublicDelegatedPrefixes contained in this scope.", "items": { - "$ref": "PacketMirroring" + "$ref": "PublicDelegatedPrefix" }, "type": "array" }, "warning": { - "description": "Informational warning which replaces the list of packetMirrorings when the list is empty.", + "description": "[Output Only] Informational warning which replaces the list of public delegated prefixes when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -44004,379 +46471,6 @@ }, "type": "object" }, - "PathMatcher": { - "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", - "id": "PathMatcher", - "properties": { - "defaultRouteAction": { - "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathMatcher's defaultRouteAction." - }, - "defaultService": { - "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", - "type": "string" - }, - "defaultUrlRedirect": { - "$ref": "HttpRedirectAction", - "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "headerAction": { - "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." - }, - "name": { - "description": "The name to which this PathMatcher is referred by the HostRule.", - "type": "string" - }, - "pathRules": { - "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nWithin a given pathMatcher, only one of pathRules or routeRules must be set.", - "items": { - "$ref": "PathRule" - }, - "type": "array" - }, - "routeRules": { - "description": "The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number.\nWithin a given pathMatcher, you can set only one of pathRules or routeRules.", - "items": { - "$ref": "HttpRouteRule" - }, - "type": "array" - } - }, - "type": "object" - }, - "PathRule": { - "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", - "id": "PathRule", - "properties": { - "paths": { - "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", - "items": { - "type": "string" - }, - "type": "array" - }, - "routeAction": { - "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathRule's routeAction." - }, - "service": { - "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", - "type": "string" - }, - "urlRedirect": { - "$ref": "HttpRedirectAction", - "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." - } - }, - "type": "object" - }, - "PerInstanceConfig": { - "id": "PerInstanceConfig", - "properties": { - "fingerprint": { - "description": "Fingerprint of this per-instance config. This field can be used in optimistic locking. It is ignored when inserting a per-instance config. An up-to-date fingerprint must be provided in order to update an existing per-instance config or the field needs to be unset.", - "format": "byte", - "type": "string" - }, - "name": { - "description": "The name of a per-instance config and its corresponding instance. Serves as a merge key during UpdatePerInstanceConfigs operations, that is, if a per-instance config with the same name exists then it will be updated, otherwise a new one will be created for the VM instance with the same name. An attempt to create a per-instance config for a VM instance that either doesn't exist or is not part of the group will result in an error.", - "type": "string" - }, - "preservedState": { - "$ref": "PreservedState", - "description": "The intended preserved state for the given instance. Does not contain preserved state generated from a stateful policy." - }, - "status": { - "description": "The status of applying this per-instance config on the corresponding managed instance.", - "enum": [ - "APPLYING", - "DELETING", - "EFFECTIVE", - "NONE", - "UNAPPLIED", - "UNAPPLIED_DELETION" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.\n\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role.\n\nFor some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).\n\n**JSON example:**\n\n{ \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 }\n\n**YAML example:**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3\n\nFor a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", - "id": "Policy", - "properties": { - "auditConfigs": { - "description": "Specifies cloud audit logging configuration for this policy.", - "items": { - "$ref": "AuditConfig" - }, - "type": "array" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.", - "items": { - "$ref": "Binding" - }, - "type": "array" - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\n**Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", - "format": "byte", - "type": "string" - }, - "iamOwned": { - "description": "", - "type": "boolean" - }, - "rules": { - "description": "If more than one rule is specified, the rules are applied in the following manner: - All matching LOG rules are always applied. - If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be applied if one or more matching rule requires logging. - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging will be applied if one or more matching rule requires logging. - Otherwise, if no rule applies, permission is denied.", - "items": { - "$ref": "Rule" - }, - "type": "array" - }, - "version": { - "description": "Specifies the format of the policy.\n\nValid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected.\n\nAny operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations:\n\n* Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions\n\n**Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.\n\nIf a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.\n\nTo learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "PreconfiguredWafSet": { - "id": "PreconfiguredWafSet", - "properties": { - "expressionSets": { - "description": "List of entities that are currently supported for WAF rules.", - "items": { - "$ref": "WafExpressionSet" - }, - "type": "array" - } - }, - "type": "object" - }, - "PreservedState": { - "description": "Preserved state for a given instance.", - "id": "PreservedState", - "properties": { - "disks": { - "additionalProperties": { - "$ref": "PreservedStatePreservedDisk" - }, - "description": "Preserved disks defined for this instance. This map is keyed with the device names of the disks.", - "type": "object" - }, - "metadata": { - "additionalProperties": { - "type": "string" - }, - "description": "Preserved metadata defined for this instance.", - "type": "object" - } - }, - "type": "object" - }, - "PreservedStatePreservedDisk": { - "id": "PreservedStatePreservedDisk", - "properties": { - "autoDelete": { - "description": "These stateful disks will never be deleted during autohealing, update, instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole MIG is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", - "enum": [ - "NEVER", - "ON_PERMANENT_INSTANCE_DELETION" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "mode": { - "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", - "enum": [ - "READ_ONLY", - "READ_WRITE" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "source": { - "description": "The URL of the disk resource that is stateful and should be attached to the VM instance.", - "type": "string" - } - }, - "type": "object" - }, - "Project": { - "description": "Represents a Project resource.\n\nA project is used to organize resources in a Google Cloud Platform environment. For more information, read about the Resource Hierarchy. (== resource_for {$api_version}.projects ==)", - "id": "Project", - "properties": { - "commonInstanceMetadata": { - "$ref": "Metadata", - "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "defaultNetworkTier": { - "description": "This signifies the default network tier used for configuring resources of the project and can only take the following values: PREMIUM, STANDARD. Initially the default network tier is PREMIUM.", - "enum": [ - "PREMIUM", - "STANDARD" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "defaultServiceAccount": { - "description": "[Output Only] Default service account used by VMs running in this project.", - "type": "string" - }, - "description": { - "description": "An optional textual description of the resource.", - "type": "string" - }, - "enabledFeatures": { - "description": "Restricted features enabled for use on this project.", - "items": { - "type": "string" - }, - "type": "array" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#project", - "description": "[Output Only] Type of the resource. Always compute#project for projects.", - "type": "string" - }, - "name": { - "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine.", - "type": "string" - }, - "quotas": { - "description": "[Output Only] Quotas assigned to this project.", - "items": { - "$ref": "Quota" - }, - "type": "array" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "usageExportLocation": { - "$ref": "UsageExportLocation", - "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." - }, - "xpnProjectStatus": { - "description": "[Output Only] The role this project has in a shared VPC configuration. Currently, only projects with the host role, which is specified by the value HOST, are differentiated.", - "enum": [ - "HOST", - "UNSPECIFIED_XPN_PROJECT_STATUS" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ProjectsDisableXpnResourceRequest": { - "id": "ProjectsDisableXpnResourceRequest", - "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "Service resource (a.k.a service project) ID." - } - }, - "type": "object" - }, - "ProjectsEnableXpnResourceRequest": { - "id": "ProjectsEnableXpnResourceRequest", - "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "Service resource (a.k.a service project) ID." - } - }, - "type": "object" - }, - "ProjectsGetXpnResources": { - "id": "ProjectsGetXpnResources", - "properties": { - "kind": { - "default": "compute#projectsGetXpnResources", - "description": "[Output Only] Type of resource. Always compute#projectsGetXpnResources for lists of service resources (a.k.a service projects)", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "resources": { - "description": "Service resources (a.k.a service projects) attached to this project as their shared VPC host.", - "items": { - "$ref": "XpnResourceId" - }, - "type": "array" - } - }, - "type": "object" - }, - "ProjectsListXpnHostsRequest": { - "id": "ProjectsListXpnHostsRequest", - "properties": { - "organization": { - "description": "Optional organization ID managed by Cloud Resource Manager, for which to list shared VPC host projects. If not specified, the organization will be inferred from the project.", - "type": "string" - } - }, - "type": "object" - }, - "ProjectsSetDefaultNetworkTierRequest": { - "id": "ProjectsSetDefaultNetworkTierRequest", - "properties": { - "networkTier": { - "description": "Default network tier to be set.", - "enum": [ - "PREMIUM", - "STANDARD" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, "Quota": { "description": "A quotas entry.", "id": "Quota", @@ -44394,11 +46488,14 @@ "AUTOSCALERS", "BACKEND_BUCKETS", "BACKEND_SERVICES", + "C2D_CPUS", "C2_CPUS", "COMMITMENTS", "COMMITTED_A2_CPUS", + "COMMITTED_C2D_CPUS", "COMMITTED_C2_CPUS", "COMMITTED_CPUS", + "COMMITTED_E2_CPUS", "COMMITTED_LICENSES", "COMMITTED_LOCAL_SSD_TOTAL_GB", "COMMITTED_MEMORY_OPTIMIZED_CPUS", @@ -44610,6 +46707,9 @@ "", "", "", + "", + "", + "", "" ], "type": "string" @@ -45908,6 +48008,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "selfLink": { "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" @@ -46489,6 +48593,10 @@ "format": "uint64", "type": "string" }, + "instanceSchedulePolicy": { + "$ref": "ResourcePolicyInstanceSchedulePolicy", + "description": "Resource policy for scheduling instance operations." + }, "kind": { "default": "compute#resourcePolicy", "description": "[Output Only] Type of the resource. Always compute#resource_policies for resource policies.", @@ -46507,6 +48615,10 @@ "region": { "type": "string" }, + "resourceStatus": { + "$ref": "ResourcePolicyResourceStatus", + "description": "[Output Only] The system status of the resource policy." + }, "selfLink": { "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" @@ -46520,6 +48632,7 @@ "enum": [ "CREATING", "DELETING", + "EXPIRED", "INVALID", "READY" ], @@ -46527,6 +48640,7 @@ "", "", "", + "", "" ], "type": "string" @@ -46730,6 +48844,44 @@ }, "type": "object" }, + "ResourcePolicyInstanceSchedulePolicy": { + "description": "An InstanceSchedulePolicy specifies when and how frequent certain operations are performed on the instance.", + "id": "ResourcePolicyInstanceSchedulePolicy", + "properties": { + "expirationTime": { + "description": "The expiration time of the schedule. The timestamp is an RFC3339 string.", + "type": "string" + }, + "startTime": { + "description": "The start time of the schedule. The timestamp is an RFC3339 string.", + "type": "string" + }, + "timeZone": { + "description": "Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.", + "type": "string" + }, + "vmStartSchedule": { + "$ref": "ResourcePolicyInstanceSchedulePolicySchedule", + "description": "Specifies the schedule for starting instances." + }, + "vmStopSchedule": { + "$ref": "ResourcePolicyInstanceSchedulePolicySchedule", + "description": "Specifies the schedule for stopping instances." + } + }, + "type": "object" + }, + "ResourcePolicyInstanceSchedulePolicySchedule": { + "description": "Schedule for an instance operation.", + "id": "ResourcePolicyInstanceSchedulePolicySchedule", + "properties": { + "schedule": { + "description": "Specifies the frequency for the operation, using the unix-cron format.", + "type": "string" + } + }, + "type": "object" + }, "ResourcePolicyList": { "id": "ResourcePolicyList", "properties": { @@ -46848,6 +49000,31 @@ }, "type": "object" }, + "ResourcePolicyResourceStatus": { + "description": "Contains output only fields. Use this sub-message for all output fields set on ResourcePolicy. The internal structure of this \"status\" field should mimic the structure of ResourcePolicy proto specification.", + "id": "ResourcePolicyResourceStatus", + "properties": { + "instanceSchedulePolicy": { + "$ref": "ResourcePolicyResourceStatusInstanceSchedulePolicyStatus", + "description": "[Output Only] Specifies a set of output values reffering to the instance_schedule_policy system status. This field should have the same name as corresponding policy field." + } + }, + "type": "object" + }, + "ResourcePolicyResourceStatusInstanceSchedulePolicyStatus": { + "id": "ResourcePolicyResourceStatusInstanceSchedulePolicyStatus", + "properties": { + "lastRunStartTime": { + "description": "[Output Only] The last time the schedule successfully ran. The timestamp is an RFC3339 string.", + "type": "string" + }, + "nextRunStartTime": { + "description": "[Output Only] The next time the schedule is planned to run. The actual time might be slightly different. The timestamp is an RFC3339 string.", + "type": "string" + } + }, + "type": "object" + }, "ResourcePolicySnapshotSchedulePolicy": { "description": "A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained.", "id": "ResourcePolicySnapshotSchedulePolicy", @@ -47324,6 +49501,10 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "encryptedInterconnectRouter": { + "description": "Field to indicate if a router is dedicated to use with encrypted Interconnect Attachment (IPsec-encrypted Cloud Interconnect feature).\nNot currently available in all Interconnect locations.", + "type": "boolean" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -47577,7 +49758,7 @@ "type": "string" }, "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: \n- ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. \n- ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. \n- ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC network. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: \n- ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. \n- ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", "items": { "enum": [ "ALL_SUBNETS" @@ -48344,8 +50525,38 @@ }, "type": "object" }, + "ScalingScheduleStatus": { + "id": "ScalingScheduleStatus", + "properties": { + "lastStartTime": { + "description": "[Output Only] The last time the scaling schedule became active. Note: this is a timestamp when a schedule actually became active, not when it was planned to do so. The timestamp is in RFC3339 text format.", + "type": "string" + }, + "nextStartTime": { + "description": "[Output Only] The next time the scaling schedule is to become active. Note: this is a timestamp when a schedule is planned to run, but the actual time might be slightly different. The timestamp is in RFC3339 text format.", + "type": "string" + }, + "state": { + "description": "[Output Only] The current state of a scaling schedule.", + "enum": [ + "ACTIVE", + "DISABLED", + "OBSOLETE", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 13", + "description": "Sets the scheduling options for an Instance. NextID: 20", "id": "Scheduling", "properties": { "automaticRestart": { @@ -48624,7 +50835,7 @@ "id": "SecurityPolicyRule", "properties": { "action": { - "description": "The Action to preform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + "description": "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", "type": "string" }, "description": { @@ -50001,7 +52212,7 @@ "type": "string" }, "ipCidrRange": { - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field is set at resource creation time. This may be a RFC 1918 IP range, or a privately routed, non-RFC 1918 IP range, not belonging to Google. The range can be expanded after creation using expandIpCidrRange.", + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field is set at resource creation time. The range can be any range listed in the Valid ranges list. The range can be expanded after creation using expandIpCidrRange.", "type": "string" }, "ipv6CidrRange": { @@ -50377,7 +52588,7 @@ "type": "number" }, "metadata": { - "description": "Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is INCLUDE_ALL_METADATA.", + "description": "Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is EXCLUDE_ALL_METADATA.", "enum": [ "CUSTOM_METADATA", "EXCLUDE_ALL_METADATA", @@ -50405,7 +52616,7 @@ "id": "SubnetworkSecondaryRange", "properties": { "ipCidrRange": { - "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. This may be a RFC 1918 IP range, or a privately, non-RFC 1918 IP range, not belonging to Google.", + "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. The range can be any range listed in the Valid ranges list.", "type": "string" }, "rangeName": { @@ -51221,6 +53432,11 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetHttpsProxy. An up-to-date fingerprint must be provided in order to patch the TargetHttpsProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetHttpsProxy.", + "format": "byte", + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -51974,6 +54190,7 @@ "description": "Session affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", "enum": [ "CLIENT_IP", + "CLIENT_IP_NO_DESTINATION", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", @@ -51988,6 +54205,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52683,6 +54901,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "proxyBind": { + "description": "This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.\n\nWhen this field is set to true, Envoy proxies set up inbound traffic interception and bind to the IP address and port specified in the forwarding rule. This is generally useful when using Traffic Director to configure Envoy as a gateway or middle proxy (in other words, not a sidecar proxy). The Envoy proxy listens for inbound requests and handles requests when it receives them.\n\nThe default is false.", + "type": "boolean" + }, "proxyHeader": { "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ @@ -54606,6 +56828,10 @@ "format": "uint32", "type": "integer" }, + "interconnectAttachment": { + "description": "URL of the interconnect attachment resource. When the value of this field is present, the VPN Gateway will be used for IPsec-encrypted Cloud Interconnect; all Egress or Ingress traffic for this VPN Gateway interface will go through the specified interconnect attachment resource.\nNot currently available in all Interconnect locations.", + "type": "string" + }, "ipAddress": { "description": "[Output Only] The external IP address for this VPN gateway interface.", "type": "string" @@ -54813,7 +57039,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the VPN tunnel, which can be one of the following: \n- PROVISIONING: Resource is being allocated for the VPN tunnel. \n- WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and Route resources are needed to setup the VPN tunnel. \n- FIRST_HANDSHAKE: Successful first handshake with the peer VPN. \n- ESTABLISHED: Secure session is successfully established with the peer VPN. \n- NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS \n- AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). \n- NEGOTIATION_FAILURE: Handshake failed. \n- DEPROVISIONING: Resources are being deallocated for the VPN tunnel. \n- FAILED: Tunnel creation has failed and the tunnel is not ready to be used. \n- NO_INCOMING_PACKETS: No incoming packets from peer. \n- REJECTED: Tunnel configuration was rejected, can be result of being blacklisted. \n- ALLOCATING_RESOURCES: Cloud VPN is in the process of allocating all required resources. \n- STOPPED: Tunnel is stopped due to its Forwarding Rules being deleted for Classic VPN tunnels or the project is in frozen state. \n- PEER_IDENTITY_MISMATCH: Peer identity does not match peer IP, probably behind NAT. \n- TS_NARROWING_NOT_ALLOWED: Traffic selector narrowing not allowed for an HA-VPN tunnel.", + "description": "[Output Only] The status of the VPN tunnel, which can be one of the following: \n- PROVISIONING: Resource is being allocated for the VPN tunnel. \n- WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and Route resources are needed to setup the VPN tunnel. \n- FIRST_HANDSHAKE: Successful first handshake with the peer VPN. \n- ESTABLISHED: Secure session is successfully established with the peer VPN. \n- NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS \n- AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). \n- NEGOTIATION_FAILURE: Handshake failed. \n- DEPROVISIONING: Resources are being deallocated for the VPN tunnel. \n- FAILED: Tunnel creation has failed and the tunnel is not ready to be used. \n- NO_INCOMING_PACKETS: No incoming packets from peer. \n- REJECTED: Tunnel configuration was rejected, can be result of being denied access. \n- ALLOCATING_RESOURCES: Cloud VPN is in the process of allocating all required resources. \n- STOPPED: Tunnel is stopped due to its Forwarding Rules being deleted for Classic VPN tunnels or the project is in frozen state. \n- PEER_IDENTITY_MISMATCH: Peer identity does not match peer IP, probably behind NAT. \n- TS_NARROWING_NOT_ALLOWED: Traffic selector narrowing not allowed for an HA-VPN tunnel.", "enum": [ "ALLOCATING_RESOURCES", "AUTHORIZATION_ERROR", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index ad291d8b854..03fecd977d9 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -83,7 +83,7 @@ const mtlsBasePath = "https://compute.mtls.googleapis.com/compute/v1/" // OAuth2 scopes used by this API. const ( - // View and manage your data across Google Cloud Platform services + // See, edit, configure, and delete your Google Cloud Platform data CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View and manage your Google Compute Engine resources @@ -156,6 +156,7 @@ func New(client *http.Client) (*Service, error) { s.GlobalNetworkEndpointGroups = NewGlobalNetworkEndpointGroupsService(s) s.GlobalOperations = NewGlobalOperationsService(s) s.GlobalOrganizationOperations = NewGlobalOrganizationOperationsService(s) + s.GlobalPublicDelegatedPrefixes = NewGlobalPublicDelegatedPrefixesService(s) s.HealthChecks = NewHealthChecksService(s) s.HttpHealthChecks = NewHttpHealthChecksService(s) s.HttpsHealthChecks = NewHttpsHealthChecksService(s) @@ -177,6 +178,8 @@ func New(client *http.Client) (*Service, error) { s.NodeTypes = NewNodeTypesService(s) s.PacketMirrorings = NewPacketMirroringsService(s) s.Projects = NewProjectsService(s) + s.PublicAdvertisedPrefixes = NewPublicAdvertisedPrefixesService(s) + s.PublicDelegatedPrefixes = NewPublicDelegatedPrefixesService(s) s.RegionAutoscalers = NewRegionAutoscalersService(s) s.RegionBackendServices = NewRegionBackendServicesService(s) s.RegionCommitments = NewRegionCommitmentsService(s) @@ -186,6 +189,7 @@ func New(client *http.Client) (*Service, error) { s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionInstances = NewRegionInstancesService(s) s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) s.RegionOperations = NewRegionOperationsService(s) @@ -256,6 +260,8 @@ type Service struct { GlobalOrganizationOperations *GlobalOrganizationOperationsService + GlobalPublicDelegatedPrefixes *GlobalPublicDelegatedPrefixesService + HealthChecks *HealthChecksService HttpHealthChecks *HttpHealthChecksService @@ -298,6 +304,10 @@ type Service struct { Projects *ProjectsService + PublicAdvertisedPrefixes *PublicAdvertisedPrefixesService + + PublicDelegatedPrefixes *PublicDelegatedPrefixesService + RegionAutoscalers *RegionAutoscalersService RegionBackendServices *RegionBackendServicesService @@ -316,6 +326,8 @@ type Service struct { RegionInstanceGroups *RegionInstanceGroupsService + RegionInstances *RegionInstancesService + RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService RegionNotificationEndpoints *RegionNotificationEndpointsService @@ -528,6 +540,15 @@ type GlobalOrganizationOperationsService struct { s *Service } +func NewGlobalPublicDelegatedPrefixesService(s *Service) *GlobalPublicDelegatedPrefixesService { + rs := &GlobalPublicDelegatedPrefixesService{s: s} + return rs +} + +type GlobalPublicDelegatedPrefixesService struct { + s *Service +} + func NewHealthChecksService(s *Service) *HealthChecksService { rs := &HealthChecksService{s: s} return rs @@ -717,6 +738,24 @@ type ProjectsService struct { s *Service } +func NewPublicAdvertisedPrefixesService(s *Service) *PublicAdvertisedPrefixesService { + rs := &PublicAdvertisedPrefixesService{s: s} + return rs +} + +type PublicAdvertisedPrefixesService struct { + s *Service +} + +func NewPublicDelegatedPrefixesService(s *Service) *PublicDelegatedPrefixesService { + rs := &PublicDelegatedPrefixesService{s: s} + return rs +} + +type PublicDelegatedPrefixesService struct { + s *Service +} + func NewRegionAutoscalersService(s *Service) *RegionAutoscalersService { rs := &RegionAutoscalersService{s: s} return rs @@ -798,6 +837,15 @@ type RegionInstanceGroupsService struct { s *Service } +func NewRegionInstancesService(s *Service) *RegionInstancesService { + rs := &RegionInstancesService{s: s} + return rs +} + +type RegionInstancesService struct { + s *Service +} + func NewRegionNetworkEndpointGroupsService(s *Service) *RegionNetworkEndpointGroupsService { rs := &RegionNetworkEndpointGroupsService{s: s} return rs @@ -1803,7 +1851,7 @@ type Address struct { // "STANDARD" NetworkTier string `json:"networkTier,omitempty"` - // PrefixLength: The prefix length if the resource reprensents an IP + // PrefixLength: The prefix length if the resource represents an IP // range. PrefixLength int64 `json:"prefixLength,omitempty"` @@ -1817,20 +1865,23 @@ type Address struct { // - `NAT_AUTO` for addresses that are external IP addresses // automatically reserved for Cloud NAT. // - `IPSEC_INTERCONNECT` for addresses created from a private IP range - // that are reserved for a VLAN attachment in an IPsec encrypted + // that are reserved for a VLAN attachment in an IPsec-encrypted Cloud // Interconnect configuration. These addresses are regional resources. // // Possible values: // "DNS_RESOLVER" // "GCE_ENDPOINT" + // "IPSEC_INTERCONNECT" // "NAT_AUTO" + // "PRIVATE_SERVICE_CONNECT" // "SHARED_LOADBALANCER_VIP" // "VPC_PEERING" Purpose string `json:"purpose,omitempty"` - // Region: [Output Only] The URL of the region where the regional - // address resides. This field is not applicable to global addresses. - // You must specify this field as part of the HTTP request URL. + // Region: [Output Only] The URL of the region where a regional address + // resides. For regional addresses, you must specify the region as a + // path parameter in the HTTP request URL. This field is not applicable + // to global addresses. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -2459,6 +2510,11 @@ type AllocationSpecificSKUAllocationReservedInstanceProperties struct { // instance. The type of disk is local-ssd. LocalSsds []*AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk `json:"localSsds,omitempty"` + // LocationHint: An opaque location hint used to place the allocation + // close to other resources. This field is for use by internal tools + // that use the public API. + LocationHint string `json:"locationHint,omitempty"` + // MachineType: Specifies type of machine (name only) which has fixed // number of vCPUs and fixed amount of memory. This also includes // specifying custom machine type following @@ -2728,6 +2784,10 @@ type AttachedDiskInitializeParams struct { // "USE_EXISTING_DISK" OnUpdateAction string `json:"onUpdateAction,omitempty"` + // ProvisionedIops: Indicates how many IOPS must be provisioned for the + // disk. + ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ResourcePolicies: Resource policies applied to this disk for // automatic snapshot creations. Specified using the full or partial // URL. For instance template, specify only the resource policy name. @@ -3027,6 +3087,10 @@ type Autoscaler struct { // resides (for autoscalers living in regional scope). Region string `json:"region,omitempty"` + // ScalingScheduleStatus: [Output Only] Status information of existing + // scaling schedules. + ScalingScheduleStatus map[string]ScalingScheduleStatus `json:"scalingScheduleStatus,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -3471,6 +3535,8 @@ type AutoscalerStatusDetails struct { // "NOT_ENOUGH_QUOTA_AVAILABLE" // "REGION_RESOURCE_STOCKOUT" // "SCALING_TARGET_DOES_NOT_EXIST" + // "SCHEDULED_INSTANCES_GREATER_THAN_AUTOSCALER_MAX" + // "SCHEDULED_INSTANCES_LESS_THAN_AUTOSCALER_MIN" // "UNKNOWN" // "UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION" // "ZONE_RESOURCE_STOCKOUT" @@ -3685,6 +3751,13 @@ type AutoscalingPolicy struct { ScaleInControl *AutoscalingPolicyScaleInControl `json:"scaleInControl,omitempty"` + // ScalingSchedules: Scaling schedules defined for an autoscaler. + // Multiple schedules can be set on an autoscaler, and they can overlap. + // During overlapping periods the greatest min_required_replicas of all + // scaling schedules is applied. Up to 128 scaling schedules are + // allowed. + ScalingSchedules map[string]AutoscalingPolicyScalingSchedule `json:"scalingSchedules,omitempty"` + // ForceSendFields is a list of field names (e.g. "CoolDownPeriodSec") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3711,6 +3784,20 @@ func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { // AutoscalingPolicyCpuUtilization: CPU utilization policy. type AutoscalingPolicyCpuUtilization struct { + // PredictiveMethod: Indicates whether predictive autoscaling based on + // CPU metric is enabled. Valid values are: + // + // * NONE (default). No predictive method is used. The autoscaler scales + // the group to meet current demand based on real-time metrics. * + // OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability + // by monitoring daily and weekly load patterns and scaling out ahead of + // anticipated demand. + // + // Possible values: + // "NONE" + // "OPTIMIZE_AVAILABILITY" + PredictiveMethod string `json:"predictiveMethod,omitempty"` + // UtilizationTarget: The target CPU utilization that the autoscaler // maintains. Must be a float value in the range (0, 1]. If not // specified, the default is 0.6. @@ -3726,15 +3813,15 @@ type AutoscalingPolicyCpuUtilization struct { // utilization. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` - // ForceSendFields is a list of field names (e.g. "UtilizationTarget") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "PredictiveMethod") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UtilizationTarget") to + // NullFields is a list of field names (e.g. "PredictiveMethod") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -3970,6 +4057,69 @@ func (s *AutoscalingPolicyScaleInControl) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalingPolicyScalingSchedule: Scaling based on user-defined +// schedule. The message describes a single scaling schedule. A scaling +// schedule changes the minimum number of VM instances an autoscaler can +// recommend, which can trigger scaling out. +type AutoscalingPolicyScalingSchedule struct { + // Description: A description of a scaling schedule. + Description string `json:"description,omitempty"` + + // Disabled: A boolean value that specifies whether a scaling schedule + // can influence autoscaler recommendations. If set to true, then a + // scaling schedule has no effect. This field is optional, and its value + // is false by default. + Disabled bool `json:"disabled,omitempty"` + + // DurationSec: The duration of time intervals, in seconds, for which + // this scaling schedule is to run. The minimum allowed value is 300. + // This field is required. + DurationSec int64 `json:"durationSec,omitempty"` + + // MinRequiredReplicas: The minimum number of VM instances that the + // autoscaler will recommend in time intervals starting according to + // schedule. This field is required. + MinRequiredReplicas int64 `json:"minRequiredReplicas,omitempty"` + + // Schedule: The start timestamps of time intervals when this scaling + // schedule is to provide a scaling signal. This field uses the extended + // cron format (with an optional year field). The expression can + // describe a single timestamp if the optional year is set, in which + // case the scaling schedule runs once. The schedule is interpreted with + // respect to time_zone. This field is required. Note: These timestamps + // only describe when autoscaler starts providing the scaling signal. + // The VMs need additional time to become serving. + Schedule string `json:"schedule,omitempty"` + + // TimeZone: The time zone to use when interpreting the schedule. The + // value of this field must be a time zone name from the tz database: + // http://en.wikipedia.org/wiki/Tz_database. This field is assigned a + // default value of ?UTC? if left empty. + TimeZone string `json:"timeZone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalingPolicyScalingSchedule) MarshalJSON() ([]byte, error) { + type NoMethod AutoscalingPolicyScalingSchedule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Backend: Message containing information of one individual backend. type Backend struct { // BalancingMode: Specifies how to determine whether the backend of a @@ -4201,6 +4351,12 @@ func (s *BackendBucket) MarshalJSON() ([]byte, error) { // BackendBucketCdnPolicy: Message containing Cloud CDN configuration // for a backend bucket. type BackendBucketCdnPolicy struct { + // BypassCacheOnRequestHeaders: Bypass the cache when the specified + // request headers are matched - e.g. Pragma or Authorization headers. + // Up to 5 headers can be specified. The cache is bypassed for all + // cdnPolicy.cacheMode settings. + BypassCacheOnRequestHeaders []*BackendBucketCdnPolicyBypassCacheOnRequestHeader `json:"bypassCacheOnRequestHeaders,omitempty"` + // CacheMode: Specifies the cache setting for all responses from this // backend. The possible values are: // @@ -4227,10 +4383,16 @@ type BackendBucketCdnPolicy struct { // "USE_ORIGIN_HEADERS" CacheMode string `json:"cacheMode,omitempty"` - // ClientTtl: Specifies a separate client (e.g. browser client) TTL, - // separate from the TTL for Cloud CDN's edge caches. Leaving this empty - // will use the same cache TTL for both Cloud CDN and the client-facing - // response. The maximum allowed value is 86400s (1 day). + // ClientTtl: Specifies a separate client (e.g. browser client) maximum + // TTL. This is used to clamp the max-age (or Expires) value sent to the + // client. With FORCE_CACHE_ALL, the lesser of client_ttl and + // default_ttl is used for the response max-age directive, along with a + // "public" directive. For cacheable content in CACHE_ALL_STATIC mode, + // client_ttl clamps the max-age from the origin (if specified), or else + // sets the response max-age directive to the lesser of the client_ttl + // and default_ttl, and also ensures a "public" cache-control directive + // is present. If a client TTL is not specified, a default value (1 + // hour) will be used. The maximum allowed value is 86400s (1 day). ClientTtl int64 `json:"clientTtl,omitempty"` // DefaultTtl: Specifies the default TTL for cached content served by @@ -4255,6 +4417,50 @@ type BackendBucketCdnPolicy struct { // the cache before the defined TTL. MaxTtl int64 `json:"maxTtl,omitempty"` + // NegativeCaching: Negative caching allows per-status code TTLs to be + // set, in order to apply fine-grained caching for common errors or + // redirects. This can reduce the load on your origin and improve + // end-user experience by reducing response latency. When the cache mode + // is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching + // applies to responses with the specified response code that lack any + // Cache-Control, Expires, or Pragma: no-cache directives. When the + // cache mode is set to FORCE_CACHE_ALL, negative caching applies to all + // responses with the specified response code, and override any caching + // headers. By default, Cloud CDN will apply the following default TTLs + // to these status codes: HTTP 300 (Multiple Choice), 301, 308 + // (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 + // (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), + // 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults + // can be overridden in negative_caching_policy. + NegativeCaching bool `json:"negativeCaching,omitempty"` + + // NegativeCachingPolicy: Sets a cache TTL for the specified HTTP status + // code. negative_caching must be enabled to configure + // negative_caching_policy. Omitting the policy and leaving + // negative_caching enabled will use Cloud CDN's default cache TTLs. + // Note that when specifying an explicit negative_caching_policy, you + // should take care to specify a cache TTL for all response codes that + // you wish to cache. Cloud CDN will not apply any default negative + // caching when a policy exists. + NegativeCachingPolicy []*BackendBucketCdnPolicyNegativeCachingPolicy `json:"negativeCachingPolicy,omitempty"` + + // RequestCoalescing: If true then Cloud CDN will combine multiple + // concurrent cache fill requests into a small number of requests to the + // origin. + RequestCoalescing bool `json:"requestCoalescing,omitempty"` + + // ServeWhileStale: Serve existing content from the cache (if available) + // when revalidating content with the origin, or when an error is + // encountered when refreshing the cache. This setting defines the + // default "max-stale" duration for any cached responses that do not + // specify a max-stale directive. Stale responses that exceed the TTL + // configured here will not be served. The default limit (max-stale) is + // 86400s (1 day), which will allow stale content to be served up to + // this limit beyond the max-age (or s-max-age) of a cached response. + // The maximum allowed value is 604800 (1 week). Set this to zero (0) to + // disable serve-while-stale. + ServeWhileStale int64 `json:"serveWhileStale,omitempty"` + // SignedUrlCacheMaxAgeSec: Maximum number of seconds the response to a // signed URL request will be considered fresh. After this time period, // the response will be revalidated before being served. Defaults to 1hr @@ -4269,7 +4475,41 @@ type BackendBucketCdnPolicy struct { // request URLs. SignedUrlKeyNames []string `json:"signedUrlKeyNames,omitempty"` - // ForceSendFields is a list of field names (e.g. "CacheMode") to + // ForceSendFields is a list of field names (e.g. + // "BypassCacheOnRequestHeaders") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "BypassCacheOnRequestHeaders") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketCdnPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketCdnPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendBucketCdnPolicyBypassCacheOnRequestHeader: Bypass the cache +// when the specified request headers are present, e.g. Pragma or +// Authorization headers. Values are case insensitive. The presence of +// such a header overrides the cache_mode setting. +type BackendBucketCdnPolicyBypassCacheOnRequestHeader struct { + // HeaderName: The header field name to match on when bypassing cache. + // Values are case-insensitive. + HeaderName string `json:"headerName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -4277,7 +4517,7 @@ type BackendBucketCdnPolicy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CacheMode") to include in + // NullFields is a list of field names (e.g. "HeaderName") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -4286,8 +4526,46 @@ type BackendBucketCdnPolicy struct { NullFields []string `json:"-"` } -func (s *BackendBucketCdnPolicy) MarshalJSON() ([]byte, error) { - type NoMethod BackendBucketCdnPolicy +func (s *BackendBucketCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketCdnPolicyBypassCacheOnRequestHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendBucketCdnPolicyNegativeCachingPolicy: Specify CDN TTLs for +// response error codes. +type BackendBucketCdnPolicyNegativeCachingPolicy struct { + // Code: The HTTP status code to define a TTL against. Only HTTP status + // codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are + // can be specified as values, and you cannot specify a status code more + // than once. + Code int64 `json:"code,omitempty"` + + // Ttl: The TTL (in seconds) for which to cache responses with the + // corresponding status code. The maximum allowed value is 1800s (30 + // minutes), noting that infrequently accessed objects may be evicted + // from the cache before the defined TTL. + Ttl int64 `json:"ttl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketCdnPolicyNegativeCachingPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -4488,9 +4766,8 @@ type BackendService struct { // Backends: The list of backends that serve this BackendService. Backends []*Backend `json:"backends,omitempty"` - // CdnPolicy: Cloud CDN configuration for this BackendService. Not - // available for Internal TCP/UDP Load Balancing and Network Load - // Balancing. + // CdnPolicy: Cloud CDN configuration for this BackendService. Only + // available for external HTTP(S) Load Balancing. CdnPolicy *BackendServiceCdnPolicy `json:"cdnPolicy,omitempty"` // CircuitBreakers: Settings controlling the volume of connections to a @@ -4656,6 +4933,17 @@ type BackendService struct { // enabled, logs will be exported to Stackdriver. LogConfig *BackendServiceLogConfig `json:"logConfig,omitempty"` + // MaxStreamDuration: Specifies the default maximum duration (timeout) + // for streams to this service. Duration is computed from the beginning + // of the stream until the response has been completely processed, + // including all retries. A stream that does not complete in this + // duration is closed. + // If not specified, there will be no timeout limit, i.e. the maximum + // duration is infinite. + // This field is only allowed when the loadBalancingScheme of the + // backend service is INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -4689,8 +4977,8 @@ type BackendService struct { // Port: Deprecated in favor of portName. The TCP port to connect on the // backend. The default value is 80. // - // This cannot be used if the loadBalancingScheme is INTERNAL (Internal - // TCP/UDP Load Balancing). + // Backend services for Internal TCP/UDP Load Balancing and Network Load + // Balancing require you omit port. Port int64 `json:"port,omitempty"` // PortName: A named port on a backend instance group representing the @@ -4756,7 +5044,7 @@ type BackendService struct { // // When the loadBalancingScheme is EXTERNAL: * For Network Load // Balancing, the possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, - // or CLIENT_IP_PORT_PROTO. * For all other load balancers that use + // or CLIENT_IP_PORT_PROTO. * For all other load balancers that use // loadBalancingScheme=EXTERNAL, the possible values are NONE, // CLIENT_IP, or GENERATED_COOKIE. * You can use GENERATED_COOKIE if the // protocol is HTTP, HTTP2, or HTTPS. @@ -4774,6 +5062,7 @@ type BackendService struct { // // Possible values: // "CLIENT_IP" + // "CLIENT_IP_NO_DESTINATION" // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" @@ -4981,6 +5270,12 @@ func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) // BackendServiceCdnPolicy: Message containing Cloud CDN configuration // for a backend service. type BackendServiceCdnPolicy struct { + // BypassCacheOnRequestHeaders: Bypass the cache when the specified + // request headers are matched - e.g. Pragma or Authorization headers. + // Up to 5 headers can be specified. The cache is bypassed for all + // cdnPolicy.cacheMode settings. + BypassCacheOnRequestHeaders []*BackendServiceCdnPolicyBypassCacheOnRequestHeader `json:"bypassCacheOnRequestHeaders,omitempty"` + // CacheKeyPolicy: The CacheKeyPolicy for this CdnPolicy. CacheKeyPolicy *CacheKeyPolicy `json:"cacheKeyPolicy,omitempty"` @@ -5010,10 +5305,16 @@ type BackendServiceCdnPolicy struct { // "USE_ORIGIN_HEADERS" CacheMode string `json:"cacheMode,omitempty"` - // ClientTtl: Specifies a separate client (e.g. browser client) TTL, - // separate from the TTL for Cloud CDN's edge caches. Leaving this empty - // will use the same cache TTL for both Cloud CDN and the client-facing - // response. The maximum allowed value is 86400s (1 day). + // ClientTtl: Specifies a separate client (e.g. browser client) maximum + // TTL. This is used to clamp the max-age (or Expires) value sent to the + // client. With FORCE_CACHE_ALL, the lesser of client_ttl and + // default_ttl is used for the response max-age directive, along with a + // "public" directive. For cacheable content in CACHE_ALL_STATIC mode, + // client_ttl clamps the max-age from the origin (if specified), or else + // sets the response max-age directive to the lesser of the client_ttl + // and default_ttl, and also ensures a "public" cache-control directive + // is present. If a client TTL is not specified, a default value (1 + // hour) will be used. The maximum allowed value is 86400s (1 day). ClientTtl int64 `json:"clientTtl,omitempty"` // DefaultTtl: Specifies the default TTL for cached content served by @@ -5038,6 +5339,50 @@ type BackendServiceCdnPolicy struct { // the cache before the defined TTL. MaxTtl int64 `json:"maxTtl,omitempty"` + // NegativeCaching: Negative caching allows per-status code TTLs to be + // set, in order to apply fine-grained caching for common errors or + // redirects. This can reduce the load on your origin and improve + // end-user experience by reducing response latency. When the cache mode + // is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching + // applies to responses with the specified response code that lack any + // Cache-Control, Expires, or Pragma: no-cache directives. When the + // cache mode is set to FORCE_CACHE_ALL, negative caching applies to all + // responses with the specified response code, and override any caching + // headers. By default, Cloud CDN will apply the following default TTLs + // to these status codes: HTTP 300 (Multiple Choice), 301, 308 + // (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 + // (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), + // 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults + // can be overridden in negative_caching_policy. + NegativeCaching bool `json:"negativeCaching,omitempty"` + + // NegativeCachingPolicy: Sets a cache TTL for the specified HTTP status + // code. negative_caching must be enabled to configure + // negative_caching_policy. Omitting the policy and leaving + // negative_caching enabled will use Cloud CDN's default cache TTLs. + // Note that when specifying an explicit negative_caching_policy, you + // should take care to specify a cache TTL for all response codes that + // you wish to cache. Cloud CDN will not apply any default negative + // caching when a policy exists. + NegativeCachingPolicy []*BackendServiceCdnPolicyNegativeCachingPolicy `json:"negativeCachingPolicy,omitempty"` + + // RequestCoalescing: If true then Cloud CDN will combine multiple + // concurrent cache fill requests into a small number of requests to the + // origin. + RequestCoalescing bool `json:"requestCoalescing,omitempty"` + + // ServeWhileStale: Serve existing content from the cache (if available) + // when revalidating content with the origin, or when an error is + // encountered when refreshing the cache. This setting defines the + // default "max-stale" duration for any cached responses that do not + // specify a max-stale directive. Stale responses that exceed the TTL + // configured here will not be served. The default limit (max-stale) is + // 86400s (1 day), which will allow stale content to be served up to + // this limit beyond the max-age (or s-max-age) of a cached response. + // The maximum allowed value is 604800 (1 week). Set this to zero (0) to + // disable serve-while-stale. + ServeWhileStale int64 `json:"serveWhileStale,omitempty"` + // SignedUrlCacheMaxAgeSec: Maximum number of seconds the response to a // signed URL request will be considered fresh. After this time period, // the response will be revalidated before being served. Defaults to 1hr @@ -5052,7 +5397,41 @@ type BackendServiceCdnPolicy struct { // request URLs. SignedUrlKeyNames []string `json:"signedUrlKeyNames,omitempty"` - // ForceSendFields is a list of field names (e.g. "CacheKeyPolicy") to + // ForceSendFields is a list of field names (e.g. + // "BypassCacheOnRequestHeaders") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "BypassCacheOnRequestHeaders") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceCdnPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceCdnPolicyBypassCacheOnRequestHeader: Bypass the cache +// when the specified request headers are present, e.g. Pragma or +// Authorization headers. Values are case insensitive. The presence of +// such a header overrides the cache_mode setting. +type BackendServiceCdnPolicyBypassCacheOnRequestHeader struct { + // HeaderName: The header field name to match on when bypassing cache. + // Values are case-insensitive. + HeaderName string `json:"headerName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -5060,18 +5439,55 @@ type BackendServiceCdnPolicy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CacheKeyPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "HeaderName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { - type NoMethod BackendServiceCdnPolicy +func (s *BackendServiceCdnPolicyBypassCacheOnRequestHeader) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceCdnPolicyBypassCacheOnRequestHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceCdnPolicyNegativeCachingPolicy: Specify CDN TTLs for +// response error codes. +type BackendServiceCdnPolicyNegativeCachingPolicy struct { + // Code: The HTTP status code to define a TTL against. Only HTTP status + // codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are + // can be specified as values, and you cannot specify a status code more + // than once. + Code int64 `json:"code,omitempty"` + + // Ttl: The TTL (in seconds) for which to cache responses with the + // corresponding status code. The maximum allowed value is 1800s (30 + // minutes), noting that infrequently accessed objects may be evicted + // from the cache before the defined TTL. + Ttl int64 `json:"ttl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceCdnPolicyNegativeCachingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceCdnPolicyNegativeCachingPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -5697,6 +6113,113 @@ func (s *Binding) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BulkInsertInstanceResource struct { + // Count: The maximum number of instances to create. + Count int64 `json:"count,omitempty,string"` + + // InstanceProperties: The instance properties defining the VM instances + // to be created. Required if sourceInstanceTemplate is not provided. + InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"` + + // LocationPolicy: Policy for chosing target zone. + LocationPolicy *LocationPolicy `json:"locationPolicy,omitempty"` + + // MinCount: The minimum number of instances to create. If no min_count + // is specified then count is used as the default value. If min_count + // instances cannot be created, then no instances will be created and + // instances already created will be deleted. + MinCount int64 `json:"minCount,omitempty,string"` + + // NamePattern: The string pattern used for the names of the VMs. Either + // name_pattern or per_instance_properties must be set. The pattern + // should contain one continuous sequence of placeholder hash characters + // (#) with each character corresponding to one digit of the generated + // instance name. Example: name_pattern of inst-#### will generate + // instance names such as inst-0001, inst-0002, ... . If there already + // exist instance(s) whose names match the name pattern in the same + // project and zone, then the generated instance numbers will start + // after the biggest existing number. For example, if there exists an + // instance with name inst-0050, then instance names generated using the + // pattern inst-#### will be inst-0051, inst-0052, etc. The name pattern + // placeholder #...# can contain up to 18 characters. + NamePattern string `json:"namePattern,omitempty"` + + // PerInstanceProperties: Per-instance properties to be set on + // individual instances. Keys of this map specify requested instance + // names. Can be empty if name_pattern is used. + PerInstanceProperties map[string]BulkInsertInstanceResourcePerInstanceProperties `json:"perInstanceProperties,omitempty"` + + // SourceInstanceTemplate: Specifies the instance template from which to + // create instances. You may combine sourceInstanceTemplate with + // instanceProperties to override specific values from an existing + // instance template. Bulk API follows the semantics of JSON Merge Patch + // described by RFC 7396. + // + // It can be a full or partial URL. For example, the following are all + // valid URLs to an instance template: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate + // + // - projects/project/global/instanceTemplates/instanceTemplate + // - global/instanceTemplates/instanceTemplate + // + // This field is optional. + SourceInstanceTemplate string `json:"sourceInstanceTemplate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Count") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { + type NoMethod BulkInsertInstanceResource + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BulkInsertInstanceResourcePerInstanceProperties: Per-instance +// properties to be set on individual instances. To be extended in the +// future. +type BulkInsertInstanceResourcePerInstanceProperties struct { + // Name: This field is only temporary. It will be removed. Do not use + // it. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BulkInsertInstanceResourcePerInstanceProperties) MarshalJSON() ([]byte, error) { + type NoMethod BulkInsertInstanceResourcePerInstanceProperties + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type CacheInvalidationRule struct { // Host: If set, this invalidation rule will only apply to requests with // a Host header matching host. @@ -6935,6 +7458,10 @@ type Disk struct { // list the supported values for the caller's project. PhysicalBlockSizeBytes int64 `json:"physicalBlockSizeBytes,omitempty,string"` + // ProvisionedIops: Indicates how many IOPS must be provisioned for the + // disk. + ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // Region: [Output Only] URL of the region where the disk resides. Only // applicable for regional resources. You must specify this field as // part of the HTTP request URL. It is not settable as a field in the @@ -6972,8 +7499,13 @@ type Disk struct { // - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk // + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/disks/disk + // // - projects/project/zones/zone/disks/disk + // - projects/project/regions/region/disks/disk // - zones/zone/disks/disk + // - regions/region/disks/disk SourceDisk string `json:"sourceDisk,omitempty"` // SourceDiskId: [Output Only] The unique ID of the disk used to create @@ -8333,11 +8865,21 @@ func (s *DisplayDevice) MarshalJSON() ([]byte, error) { } type DistributionPolicy struct { + // TargetShape: The distribution shape to which the group converges + // either proactively or on resize events (depending on the value set in + // updatePolicy.instanceRedistributionType). + // + // Possible values: + // "ANY" + // "BALANCED" + // "EVEN" + TargetShape string `json:"targetShape,omitempty"` + // Zones: Zones where the regional managed instance group will create // and manage its instances. Zones []*DistributionPolicyZoneConfiguration `json:"zones,omitempty"` - // ForceSendFields is a list of field names (e.g. "Zones") to + // ForceSendFields is a list of field names (e.g. "TargetShape") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -8345,10 +8887,10 @@ type DistributionPolicy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Zones") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "TargetShape") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -9540,14 +10082,15 @@ type FirewallPolicy struct { // property when you create the resource. Description string `json:"description,omitempty"` - // DisplayName: User-provided name of the Organization firewall plicy. - // The name should be unique in the organization in which the firewall - // policy is created. The name must be 1-63 characters long, and comply - // with RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // DisplayName: Depreacted, please use short name instead. User-provided + // name of the Organization firewall plicy. The name should be unique in + // the organization in which the firewall policy is created. The name + // must be 1-63 characters long, and comply with RFC1035. Specifically, + // the name must be 1-63 characters long and match the regular + // expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + // character must be a lowercase letter, and all following characters + // must be a dash, lowercase letter, or digit, except the last + // character, which cannot be a dash. DisplayName string `json:"displayName,omitempty"` // Fingerprint: Specifies a fingerprint for this resource, which is @@ -9594,6 +10137,16 @@ type FirewallPolicy struct { // with the resource id. SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // ShortName: User-provided name of the Organization firewall plicy. The + // name should be unique in the organization in which the firewall + // policy is created. The name must be 1-63 characters long, and comply + // with RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + ShortName string `json:"shortName,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9625,8 +10178,8 @@ type FirewallPolicyAssociation struct { // AttachmentTarget: The target that the firewall policy is attached to. AttachmentTarget string `json:"attachmentTarget,omitempty"` - // DisplayName: [Output Only] The display name of the firewall policy of - // the association. + // DisplayName: [Output Only] Deprecated, please use short name instead. + // The display name of the firewall policy of the association. DisplayName string `json:"displayName,omitempty"` // FirewallPolicyId: [Output Only] The firewall policy ID of the @@ -9636,6 +10189,10 @@ type FirewallPolicyAssociation struct { // Name: The name for an association. Name string `json:"name,omitempty"` + // ShortName: [Output Only] The short name of the firewall policy of the + // association. + ShortName string `json:"shortName,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9827,8 +10384,7 @@ type FirewallPolicyRule struct { // for status are 403, 404, and 502. Action string `json:"action,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. + // Description: An optional description for this resource. Description string `json:"description,omitempty"` // Direction: The direction in which this rule applies. @@ -10087,6 +10643,9 @@ type ForwardingRule struct { // // Must be set to `0.0.0.0` when the target is targetGrpcProxy that has // validateForProxyless field set to true. + // + // For Private Service Connect forwarding rules that forward traffic to + // Google APIs, IP address must be provided. IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. @@ -10262,6 +10821,9 @@ type ForwardingRule struct { // network that the load balanced IP should belong to for this // Forwarding Rule. If this field is not specified, the default network // will be used. + // + // For Private Service Connect forwarding rules that forward traffic to + // Google APIs, a network must be provided. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -10321,6 +10883,10 @@ type ForwardingRule struct { // (/load-balancing/docs/forwarding-rule-concepts#port_specifications). Ports []string `json:"ports,omitempty"` + // PscConnectionId: [Output Only] The PSC connection id of the PSC + // Forwarding Rule. + PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` + // Region: [Output Only] URL of the region where the regional forwarding // rule resides. This field is not applicable to global forwarding // rules. You must specify this field as part of the HTTP request URL. @@ -10330,6 +10896,14 @@ type ForwardingRule struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ServiceDirectoryRegistrations: Service Directory resources to + // register this forwarding rule with. Currently, only supports a single + // Service Directory resource. + // + // It is only supported for Internal TCP/UDP Load Balancing and Internal + // HTTP(S) Load Balancing. + ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` + // ServiceLabel: An optional prefix to the service name for this // Forwarding Rule. If specified, the prefix is the first label of the // fully qualified service name. @@ -10361,14 +10935,6 @@ type ForwardingRule struct { // subnetwork must be specified. Subnetwork string `json:"subnetwork,omitempty"` - // Target: The URL of the target resource to receive the matched - // traffic. For regional forwarding rules, this target must be in the - // same region as the forwarding rule. For global forwarding rules, this - // target must be a global load balancing resource. The forwarded - // traffic must be of a type appropriate to the target object. For more - // information, see the "Target" column in Port specifications - // (/load-balancing/docs/forwarding-rule-concepts#ip_address_specificatio - // ns). Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -10742,6 +11308,49 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleServiceDirectoryRegistration: Describes the +// auto-registration of the Forwarding Rule to Service Directory. The +// region and project of the Service Directory resource generated from +// this registration will be the same as this Forwarding Rule. +type ForwardingRuleServiceDirectoryRegistration struct { + // Namespace: Service Directory namespace to register the forwarding + // rule under. + Namespace string `json:"namespace,omitempty"` + + // Service: Service Directory service to register the forwarding rule + // under. + Service string `json:"service,omitempty"` + + // ServiceDirectoryRegion: [Optional] Service Directory region to + // register this global forwarding rule under. Default to "us-central1". + // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding + // Rules on the same network should use the same Service Directory + // region. + ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Namespace") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Namespace") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleServiceDirectoryRegistration) MarshalJSON() ([]byte, error) { + type NoMethod ForwardingRuleServiceDirectoryRegistration + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ForwardingRulesScopedList struct { // ForwardingRules: A list of forwarding rules contained in this scope. ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` @@ -11562,11 +12171,12 @@ type HealthCheck struct { // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // RFC1035. For example, a name that is 1-63 characters long, matches + // the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise + // complies with RFC1035. This regular expression describes a name where + // the first character is a lowercase letter, and all following + // characters are a dash, lowercase letter, or digit, except the last + // character, which isn't a dash. Name string `json:"name,omitempty"` // Region: [Output Only] Region where the health check resides. Not @@ -12458,6 +13068,14 @@ type HealthStatus struct { // Annotations: Metadata defined as annotations for network endpoint. Annotations map[string]string `json:"annotations,omitempty"` + // ForwardingRule: URL of the forwarding rule associated with the health + // status of the instance. + ForwardingRule string `json:"forwardingRule,omitempty"` + + // ForwardingRuleIp: A forwarding rule IP address assigned to this + // instance. + ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` + // HealthState: Health state of the instance. // // Possible values: @@ -12468,7 +13086,9 @@ type HealthStatus struct { // Instance: URL of the instance resource. Instance string `json:"instance,omitempty"` - // IpAddress: A forwarding rule IP address assigned to this instance. + // IpAddress: For target pool based Network Load Balancing, it indicates + // the forwarding rule's IP address assigned to this instance. For other + // types of load balancing, the field indicates VM internal ip. IpAddress string `json:"ipAddress,omitempty"` // Port: The named port of the instance group, not necessarily the port @@ -13384,6 +14004,19 @@ type HttpRouteAction struct { // validateForProxyless field set to true. FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` + // MaxStreamDuration: Specifies the maximum duration (timeout) for + // streams on the selected route. Unlike the timeout field where the + // timeout duration starts from the time the request has been fully + // processed (i.e. end-of-stream), the duration in this field is + // computed from the beginning of the stream until the response has been + // completely processed, including all retries. A stream that does not + // complete in this duration is closed. + // If not specified, will use the largest maxStreamDuration among all + // backend services associated with the route. + // This field is only allowed if the Url map is used with backend + // services with loadBalancingScheme set to INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + // RequestMirrorPolicy: Specifies the policy on how requests intended // for the route's backends are shadowed to a separate mirrored backend // service. Loadbalancer does not wait for responses from the shadow @@ -13987,6 +14620,9 @@ type Image struct { // RawDisk: The parameters of the raw disk image. RawDisk *ImageRawDisk `json:"rawDisk,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -14492,6 +15128,15 @@ type Instance struct { // Multiple interfaces are supported per instance. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // PostKeyRevocationActionType: PostKeyRevocationActionType of the + // instance. + // + // Possible values: + // "NOOP" + // "POST_KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" + // "SHUTDOWN" + PostKeyRevocationActionType string `json:"postKeyRevocationActionType,omitempty"` + // PrivateIpv6GoogleAccess: The private IPv6 google access type for the // VM. If not specified, use INHERIT_FROM_SUBNETWORK as default. // @@ -17741,6 +18386,15 @@ type InstanceProperties struct { // interface. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // PostKeyRevocationActionType: PostKeyRevocationActionType of the + // instance. + // + // Possible values: + // "NOOP" + // "POST_KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" + // "SHUTDOWN" + PostKeyRevocationActionType string `json:"postKeyRevocationActionType,omitempty"` + // PrivateIpv6GoogleAccess: The private IPv6 google access type for VMs. // If not specified, use INHERIT_FROM_SUBNETWORK as default. // @@ -18139,6 +18793,85 @@ func (s *InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesGetEffectiveFirewallsResponse struct { + // FirewallPolicys: Effective firewalls from firewall policies. + FirewallPolicys []*InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` + + // Firewalls: Effective firewalls on the instance. + Firewalls []*Firewall `json:"firewalls,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "FirewallPolicys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FirewallPolicys") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { + type NoMethod InstancesGetEffectiveFirewallsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { + // DisplayName: [Output Only] Deprecated, please use short name instead. + // The display name of the firewall policy. + DisplayName string `json:"displayName,omitempty"` + + // Name: [Output Only] The name of the firewall policy. + Name string `json:"name,omitempty"` + + // Rules: The rules that apply to the network. + Rules []*FirewallPolicyRule `json:"rules,omitempty"` + + // ShortName: [Output Only] The short name of the firewall policy. + ShortName string `json:"shortName,omitempty"` + + // Type: [Output Only] The type of the firewall policy. + // + // Possible values: + // "HIERARCHY" + // "UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { + type NoMethod InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesRemoveResourcePoliciesRequest struct { // ResourcePolicies: Resource policies to be removed from this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` @@ -18791,6 +19524,23 @@ type InterconnectAttachment struct { // "AVAILABILITY_DOMAIN_ANY" EdgeAvailabilityDomain string `json:"edgeAvailabilityDomain,omitempty"` + // Encryption: Indicates the user-supplied encryption option of this + // interconnect attachment: + // - NONE is the default value, which means that the attachment carries + // unencrypted traffic. VMs can send traffic to, or receive traffic + // from, this type of attachment. + // - IPSEC indicates that the attachment carries only traffic encrypted + // by an IPsec device such as an HA VPN gateway. VMs cannot directly + // send traffic to, or receive traffic from, such an attachment. To use + // IPsec-encrypted Cloud Interconnect, create the attachment using this + // option. + // Not currently available in all Interconnect locations. + // + // Possible values: + // "IPSEC" + // "NONE" + Encryption string `json:"encryption,omitempty"` + // GoogleReferenceId: [Output Only] Google reference ID, to be used when // raising support tickets with Google or otherwise to debug backend // connectivity issues. [Deprecated] This field is not used. @@ -18804,6 +19554,24 @@ type InterconnectAttachment struct { // attachment's traffic will traverse through. Interconnect string `json:"interconnect,omitempty"` + // IpsecInternalAddresses: URL of addresses that have been reserved for + // the interconnect attachment, Used only for interconnect attachment + // that has the encryption option as IPSEC. The addresses must be RFC + // 1918 IP address ranges. When creating HA VPN gateway over the + // interconnect attachment, if the attachment is configured to use an + // RFC 1918 IP address, then the VPN gateway?s IP address will be + // allocated from the IP address range specified here. For example, if + // the HA VPN gateway?s interface 0 is paired to this interconnect + // attachment, then an RFC 1918 IP address for the VPN gateway interface + // 0 will be allocated from the IP address specified for this + // interconnect attachment. If this field is not specified for + // interconnect attachment that has encryption option as IPSEC, later on + // when creating HA VPN gateway on this interconnect attachment, the HA + // VPN gateway's IP address will be allocated from regional external IP + // address pool. + // Not currently available in all Interconnect locations. + IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` + // Kind: [Output Only] Type of the resource. Always // compute#interconnectAttachment for interconnect attachments. Kind string `json:"kind,omitempty"` @@ -20791,6 +21559,70 @@ func (s *LocalDisk) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LocationPolicy: Configuration for location policy among multiple +// possible locations (e.g. preferences for zone selection among zones +// in a single region). +type LocationPolicy struct { + // Locations: Location configurations mapped by location name. Currently + // only zone names are supported and must be represented as valid + // internal URLs, such as zones/us-central1-a. + Locations map[string]LocationPolicyLocation `json:"locations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Locations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocationPolicy) MarshalJSON() ([]byte, error) { + type NoMethod LocationPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LocationPolicyLocation struct { + // Preference: Preference for a given locaction: ALLOW or DENY. + // + // Possible values: + // "ALLOW" + // "DENY" + // "PREFERENCE_UNSPECIFIED" + Preference string `json:"preference,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Preference") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Preference") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocationPolicyLocation) MarshalJSON() ([]byte, error) { + type NoMethod LocationPolicyLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LogConfig: Specifies what kind of log the caller must write type LogConfig struct { // CloudAudit: Cloud audit options. @@ -22299,6 +23131,7 @@ type NetworkEndpointGroup struct { // SERVERLESS. // // Possible values: + // "GCE_VM_IP" // "GCE_VM_IP_PORT" // "INTERNET_FQDN_PORT" // "INTERNET_IP_PORT" @@ -23269,8 +24102,9 @@ type NetworkInterface struct { // Fingerprint: Fingerprint hash of contents stored in this network // interface. This field will be ignored when inserting an Instance or // adding a NetworkInterface. An up-to-date fingerprint must be provided - // in order to update the NetworkInterface, otherwise the request will - // fail with error 412 conditionNotMet. + // in order to update the NetworkInterface. The request will fail with + // error 400 Bad Request if the fingerprint is not provided, or 412 + // Precondition Failed if the fingerprint is out of date. Fingerprint string `json:"fingerprint,omitempty"` // Ipv6Address: [Output Only] An IPv6 internal network address for this @@ -23692,6 +24526,86 @@ func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type NetworksGetEffectiveFirewallsResponse struct { + // FirewallPolicys: Effective firewalls from firewall policy. + FirewallPolicys []*NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` + + // Firewalls: Effective firewalls on the network. + Firewalls []*Firewall `json:"firewalls,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "FirewallPolicys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FirewallPolicys") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworksGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { + type NoMethod NetworksGetEffectiveFirewallsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { + // DisplayName: [Output Only] Deprecated, please use short name instead. + // The display name of the firewall policy. + DisplayName string `json:"displayName,omitempty"` + + // Name: [Output Only] The name of the firewall policy. + Name string `json:"name,omitempty"` + + // Rules: The rules that apply to the network. + Rules []*FirewallPolicyRule `json:"rules,omitempty"` + + // ShortName: [Output Only] The short name of the firewall policy. + ShortName string `json:"shortName,omitempty"` + + // Type: [Output Only] The type of the firewall policy. + // + // Possible values: + // "HIERARCHY" + // "NETWORK" + // "UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { + type NoMethod NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type NetworksRemovePeeringRequest struct { // Name: Name of the peering, which should conform to RFC1035. Name string `json:"name,omitempty"` @@ -23776,6 +24690,12 @@ type NodeGroup struct { // compute#nodeGroup for node group. Kind string `json:"kind,omitempty"` + // LocationHint: An opaque location hint used to place the Node close to + // other resources. This field is for use by internal tools that use the + // public API. The location hint here on the NodeGroup overrides any + // location_hint present in the NodeTemplate. + LocationHint string `json:"locationHint,omitempty"` + // MaintenancePolicy: Specifies how to handle instances when a node in // the group undergoes maintenance. Set to one of: DEFAULT, // RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is @@ -24268,6 +25188,9 @@ type NodeGroupNode struct { // NodeType: The type of this node. NodeType string `json:"nodeType,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // ServerBinding: Binding properties for the physical server. ServerBinding *ServerBinding `json:"serverBinding,omitempty"` @@ -26172,6 +27095,11 @@ type Operation struct { // Name: [Output Only] Name of the operation. Name string `json:"name,omitempty"` + // OperationGroupId: [Output Only] An ID that represents a group of + // operations, such as when a group of operations results from a + // `bulkInsert` API request. + OperationGroupId string `json:"operationGroupId,omitempty"` + // OperationType: [Output Only] The type of operation, such as `insert`, // `update`, or `delete`, and so on. OperationType string `json:"operationType,omitempty"` @@ -28480,260 +29408,1182 @@ func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Quota: A quotas entry. -type Quota struct { - // Limit: [Output Only] Quota limit for this metric. - Limit float64 `json:"limit,omitempty"` - - // Metric: [Output Only] Name of the quota metric. - // - // Possible values: - // "A2_CPUS" - // "AFFINITY_GROUPS" - // "AUTOSCALERS" - // "BACKEND_BUCKETS" - // "BACKEND_SERVICES" - // "C2_CPUS" - // "COMMITMENTS" - // "COMMITTED_A2_CPUS" - // "COMMITTED_C2_CPUS" - // "COMMITTED_CPUS" - // "COMMITTED_LICENSES" - // "COMMITTED_LOCAL_SSD_TOTAL_GB" - // "COMMITTED_MEMORY_OPTIMIZED_CPUS" - // "COMMITTED_N2D_CPUS" - // "COMMITTED_N2_CPUS" - // "COMMITTED_NVIDIA_A100_GPUS" - // "COMMITTED_NVIDIA_K80_GPUS" - // "COMMITTED_NVIDIA_P100_GPUS" - // "COMMITTED_NVIDIA_P4_GPUS" - // "COMMITTED_NVIDIA_T4_GPUS" - // "COMMITTED_NVIDIA_V100_GPUS" - // "CPUS" - // "CPUS_ALL_REGIONS" - // "DISKS_TOTAL_GB" - // "E2_CPUS" - // "EXTERNAL_NETWORK_LB_FORWARDING_RULES" - // "EXTERNAL_PROTOCOL_FORWARDING_RULES" - // "EXTERNAL_VPN_GATEWAYS" - // "FIREWALLS" - // "FORWARDING_RULES" - // "GLOBAL_INTERNAL_ADDRESSES" - // "GPUS_ALL_REGIONS" - // "HEALTH_CHECKS" - // "IMAGES" - // "INSTANCES" - // "INSTANCE_GROUPS" - // "INSTANCE_GROUP_MANAGERS" - // "INSTANCE_TEMPLATES" - // "INTERCONNECTS" - // "INTERCONNECT_ATTACHMENTS_PER_REGION" - // "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS" - // "INTERCONNECT_TOTAL_GBPS" - // "INTERNAL_ADDRESSES" - // "INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES" - // "IN_PLACE_SNAPSHOTS" - // "IN_USE_ADDRESSES" - // "IN_USE_BACKUP_SCHEDULES" - // "IN_USE_SNAPSHOT_SCHEDULES" - // "LOCAL_SSD_TOTAL_GB" - // "M1_CPUS" - // "M2_CPUS" - // "MACHINE_IMAGES" - // "N2D_CPUS" - // "N2_CPUS" - // "NETWORKS" - // "NETWORK_ENDPOINT_GROUPS" - // "NETWORK_FIREWALL_POLICIES" - // "NODE_GROUPS" - // "NODE_TEMPLATES" - // "NVIDIA_A100_GPUS" - // "NVIDIA_K80_GPUS" - // "NVIDIA_P100_GPUS" - // "NVIDIA_P100_VWS_GPUS" - // "NVIDIA_P4_GPUS" - // "NVIDIA_P4_VWS_GPUS" - // "NVIDIA_T4_GPUS" - // "NVIDIA_T4_VWS_GPUS" - // "NVIDIA_V100_GPUS" - // "PACKET_MIRRORINGS" - // "PD_EXTREME_TOTAL_PROVISIONED_IOPS" - // "PREEMPTIBLE_CPUS" - // "PREEMPTIBLE_LOCAL_SSD_GB" - // "PREEMPTIBLE_NVIDIA_A100_GPUS" - // "PREEMPTIBLE_NVIDIA_K80_GPUS" - // "PREEMPTIBLE_NVIDIA_P100_GPUS" - // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_P4_GPUS" - // "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_T4_GPUS" - // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_V100_GPUS" - // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" - // "PUBLIC_ADVERTISED_PREFIXES" - // "PUBLIC_DELEGATED_PREFIXES" - // "REGIONAL_AUTOSCALERS" - // "REGIONAL_INSTANCE_GROUP_MANAGERS" - // "RESERVATIONS" - // "RESOURCE_POLICIES" - // "ROUTERS" - // "ROUTES" - // "SECURITY_POLICIES" - // "SECURITY_POLICY_CEVAL_RULES" - // "SECURITY_POLICY_RULES" - // "SNAPSHOTS" - // "SSD_TOTAL_GB" - // "SSL_CERTIFICATES" - // "STATIC_ADDRESSES" - // "STATIC_BYOIP_ADDRESSES" - // "SUBNETWORKS" - // "TARGET_HTTPS_PROXIES" - // "TARGET_HTTP_PROXIES" - // "TARGET_INSTANCES" - // "TARGET_POOLS" - // "TARGET_SSL_PROXIES" - // "TARGET_TCP_PROXIES" - // "TARGET_VPN_GATEWAYS" - // "URL_MAPS" - // "VPN_GATEWAYS" - // "VPN_TUNNELS" - // "XPN_SERVICE_PROJECTS" - Metric string `json:"metric,omitempty"` - - // Owner: [Output Only] Owning resource. This is the resource on which - // this quota is applied. - Owner string `json:"owner,omitempty"` - - // Usage: [Output Only] Current usage of this metric. - Usage float64 `json:"usage,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Limit") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Limit") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Quota) MarshalJSON() ([]byte, error) { - type NoMethod Quota - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Quota) UnmarshalJSON(data []byte) error { - type NoMethod Quota - var s1 struct { - Limit gensupport.JSONFloat64 `json:"limit"` - Usage gensupport.JSONFloat64 `json:"usage"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Limit = float64(s1.Limit) - s.Usage = float64(s1.Usage) - return nil -} - -// Reference: Represents a reference to a resource. -type Reference struct { - // Kind: [Output Only] Type of the resource. Always compute#reference - // for references. - Kind string `json:"kind,omitempty"` - - // ReferenceType: A description of the reference type with no implied - // semantics. Possible values include: - // - MEMBER_OF - ReferenceType string `json:"referenceType,omitempty"` - - // Referrer: URL of the resource which refers to the target. - Referrer string `json:"referrer,omitempty"` - - // Target: URL of the resource to which this reference points. - Target string `json:"target,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Reference) MarshalJSON() ([]byte, error) { - type NoMethod Reference - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Region: Represents a Region resource. -// -// A region is a geographical area where a resource is located. For more -// information, read Regions and Zones. (== resource_for -// {$api_version}.regions ==) -type Region struct { +// PublicAdvertisedPrefix: A public advertised prefix represents an +// aggregated IP prefix or netblock which customers bring to cloud. The +// IP prefix is a single unit of route advertisement and is announced +// globally to the internet. +type PublicAdvertisedPrefix struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Deprecated -- [Output Only] The deprecation status associated with - // this region. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. + // Description: An optional description of this resource. Provide this + // property when you create the resource. Description string `json:"description,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // DnsVerificationIp: The IPv4 address to be used for reverse DNS + // verification. + DnsVerificationIp string `json:"dnsVerificationIp,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a new PublicAdvertisedPrefix. An + // up-to-date fingerprint must be provided in order to update the + // PublicAdvertisedPrefix, otherwise the request will fail with error + // 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // PublicAdvertisedPrefix. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#region for - // regions. + // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // this public advertised prefix. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#publicAdvertisedPrefix for public advertised prefixes. Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Quotas: [Output Only] Quotas assigned to this region. - Quotas []*Quota `json:"quotas,omitempty"` + // PublicDelegatedPrefixs: [Output Only] The list of public delegated + // prefixes that exist for this public advertised prefix. + PublicDelegatedPrefixs []*PublicAdvertisedPrefixPublicDelegatedPrefix `json:"publicDelegatedPrefixs,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] Status of the region, either UP or DOWN. + // SharedSecret: [Output Only] The shared secret to be used for reverse + // DNS verification. + SharedSecret string `json:"sharedSecret,omitempty"` + + // Status: The status of the public advertised prefix. // // Possible values: - // "DOWN" - // "UP" + // "INITIAL" + // "PREFIX_CONFIGURATION_COMPLETE" + // "PREFIX_CONFIGURATION_IN_PROGRESS" + // "PREFIX_REMOVAL_IN_PROGRESS" + // "PTR_CONFIGURED" + // "REVERSE_DNS_LOOKUP_FAILED" + // "VALIDATED" Status string `json:"status,omitempty"` - // SupportsPzs: [Output Only] Reserved for future use. - SupportsPzs bool `json:"supportsPzs,omitempty"` - - // Zones: [Output Only] A list of zones available in this region, in the - // form of resource URLs. - Zones []string `json:"zones,omitempty"` - + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PublicAdvertisedPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefix + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicAdvertisedPrefixList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of PublicAdvertisedPrefix resources. + Items []*PublicAdvertisedPrefix `json:"items,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#publicAdvertisedPrefix for public advertised prefixes. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *PublicAdvertisedPrefixListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicAdvertisedPrefixList) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublicAdvertisedPrefixListWarning: [Output Only] Informational +// warning message. +type PublicAdvertisedPrefixListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*PublicAdvertisedPrefixListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicAdvertisedPrefixListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicAdvertisedPrefixListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicAdvertisedPrefixListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublicAdvertisedPrefixPublicDelegatedPrefix: Represents a CIDR range +// which can be used to assign addresses. +type PublicAdvertisedPrefixPublicDelegatedPrefix struct { + // IpRange: The IP address range of the public delegated prefix + IpRange string `json:"ipRange,omitempty"` + + // Name: The name of the public delegated prefix + Name string `json:"name,omitempty"` + + // Project: The project number of the public delegated prefix + Project string `json:"project,omitempty"` + + // Region: The region of the public delegated prefix if it is regional. + // If absent, the prefix is global. + Region string `json:"region,omitempty"` + + // Status: The status of the public delegated prefix. Possible values + // are: INITIALIZING: The public delegated prefix is being initialized + // and addresses cannot be created yet. ANNOUNCED: The public delegated + // prefix is active. + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpRange") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicAdvertisedPrefixPublicDelegatedPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixPublicDelegatedPrefix + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublicDelegatedPrefix: A PublicDelegatedPrefix resource represents an +// IP block within a PublicAdvertisedPrefix that is configured within a +// single cloud scope (global or region). IPs in the block can be +// allocated to resources within that scope. Public delegated prefixes +// may be further broken up into smaller IP blocks in the same scope as +// the parent block. +type PublicDelegatedPrefix struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a new PublicDelegatedPrefix. An + // up-to-date fingerprint must be provided in order to update the + // PublicDelegatedPrefix, otherwise the request will fail with error 412 + // conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // PublicDelegatedPrefix. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + + // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // this public delegated prefix. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // IsLiveMigration: If true, the prefix will be live migrated. + IsLiveMigration bool `json:"isLiveMigration,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#publicDelegatedPrefix for public delegated prefixes. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ParentPrefix: The URL of parent prefix. Either PublicAdvertisedPrefix + // or PublicDelegatedPrefix. + ParentPrefix string `json:"parentPrefix,omitempty"` + + // PublicDelegatedSubPrefixs: The list of sub public delegated prefixes + // that exist for this public delegated prefix. + PublicDelegatedSubPrefixs []*PublicDelegatedPrefixPublicDelegatedSubPrefix `json:"publicDelegatedSubPrefixs,omitempty"` + + // Region: [Output Only] URL of the region where the public delegated + // prefix resides. This field applies only to the region resource. You + // must specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the public delegated prefix. + // + // Possible values: + // "ANNOUNCED" + // "DELETING" + // "INITIALIZING" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefix + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicDelegatedPrefixAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of PublicDelegatedPrefixesScopedList resources. + Items map[string]PublicDelegatedPrefixesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#publicDelegatedPrefixAggregatedList for aggregated lists of + // public delegated prefixes. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *PublicDelegatedPrefixAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublicDelegatedPrefixAggregatedListWarning: [Output Only] +// Informational warning message. +type PublicDelegatedPrefixAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*PublicDelegatedPrefixAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicDelegatedPrefixAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicDelegatedPrefixList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of PublicDelegatedPrefix resources. + Items []*PublicDelegatedPrefix `json:"items,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#publicDelegatedPrefixList for public delegated prefixes. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *PublicDelegatedPrefixListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixList) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublicDelegatedPrefixListWarning: [Output Only] Informational warning +// message. +type PublicDelegatedPrefixListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*PublicDelegatedPrefixListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicDelegatedPrefixListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublicDelegatedPrefixPublicDelegatedSubPrefix: Represents a sub +// PublicDelegatedPrefix. +type PublicDelegatedPrefixPublicDelegatedSubPrefix struct { + // DelegateeProject: Name of the project scoping this + // PublicDelegatedSubPrefix. + DelegateeProject string `json:"delegateeProject,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // this sub public delegated prefix. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // IsAddress: Whether the sub prefix is delegated to create Address + // resources in the delegatee project. + IsAddress bool `json:"isAddress,omitempty"` + + // Name: The name of the sub public delegated prefix. + Name string `json:"name,omitempty"` + + // Region: [Output Only] The region of the sub public delegated prefix + // if it is regional. If absent, the sub prefix is global. + Region string `json:"region,omitempty"` + + // Status: [Output Only] The status of the sub public delegated prefix. + // + // Possible values: + // "ACTIVE" + // "INACTIVE" + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DelegateeProject") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DelegateeProject") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixPublicDelegatedSubPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixPublicDelegatedSubPrefix + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicDelegatedPrefixesScopedList struct { + // PublicDelegatedPrefixes: [Output Only] A list of + // PublicDelegatedPrefixes contained in this scope. + PublicDelegatedPrefixes []*PublicDelegatedPrefix `json:"publicDelegatedPrefixes,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of public delegated prefixes when the list is empty. + Warning *PublicDelegatedPrefixesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "PublicDelegatedPrefixes") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PublicDelegatedPrefixes") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PublicDelegatedPrefixesScopedListWarning: [Output Only] Informational +// warning which replaces the list of public delegated prefixes when the +// list is empty. +type PublicDelegatedPrefixesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*PublicDelegatedPrefixesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicDelegatedPrefixesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefixesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Quota: A quotas entry. +type Quota struct { + // Limit: [Output Only] Quota limit for this metric. + Limit float64 `json:"limit,omitempty"` + + // Metric: [Output Only] Name of the quota metric. + // + // Possible values: + // "A2_CPUS" + // "AFFINITY_GROUPS" + // "AUTOSCALERS" + // "BACKEND_BUCKETS" + // "BACKEND_SERVICES" + // "C2D_CPUS" + // "C2_CPUS" + // "COMMITMENTS" + // "COMMITTED_A2_CPUS" + // "COMMITTED_C2D_CPUS" + // "COMMITTED_C2_CPUS" + // "COMMITTED_CPUS" + // "COMMITTED_E2_CPUS" + // "COMMITTED_LICENSES" + // "COMMITTED_LOCAL_SSD_TOTAL_GB" + // "COMMITTED_MEMORY_OPTIMIZED_CPUS" + // "COMMITTED_N2D_CPUS" + // "COMMITTED_N2_CPUS" + // "COMMITTED_NVIDIA_A100_GPUS" + // "COMMITTED_NVIDIA_K80_GPUS" + // "COMMITTED_NVIDIA_P100_GPUS" + // "COMMITTED_NVIDIA_P4_GPUS" + // "COMMITTED_NVIDIA_T4_GPUS" + // "COMMITTED_NVIDIA_V100_GPUS" + // "CPUS" + // "CPUS_ALL_REGIONS" + // "DISKS_TOTAL_GB" + // "E2_CPUS" + // "EXTERNAL_NETWORK_LB_FORWARDING_RULES" + // "EXTERNAL_PROTOCOL_FORWARDING_RULES" + // "EXTERNAL_VPN_GATEWAYS" + // "FIREWALLS" + // "FORWARDING_RULES" + // "GLOBAL_INTERNAL_ADDRESSES" + // "GPUS_ALL_REGIONS" + // "HEALTH_CHECKS" + // "IMAGES" + // "INSTANCES" + // "INSTANCE_GROUPS" + // "INSTANCE_GROUP_MANAGERS" + // "INSTANCE_TEMPLATES" + // "INTERCONNECTS" + // "INTERCONNECT_ATTACHMENTS_PER_REGION" + // "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS" + // "INTERCONNECT_TOTAL_GBPS" + // "INTERNAL_ADDRESSES" + // "INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES" + // "IN_PLACE_SNAPSHOTS" + // "IN_USE_ADDRESSES" + // "IN_USE_BACKUP_SCHEDULES" + // "IN_USE_SNAPSHOT_SCHEDULES" + // "LOCAL_SSD_TOTAL_GB" + // "M1_CPUS" + // "M2_CPUS" + // "MACHINE_IMAGES" + // "N2D_CPUS" + // "N2_CPUS" + // "NETWORKS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" + // "NVIDIA_P4_VWS_GPUS" + // "NVIDIA_T4_GPUS" + // "NVIDIA_T4_VWS_GPUS" + // "NVIDIA_V100_GPUS" + // "PACKET_MIRRORINGS" + // "PD_EXTREME_TOTAL_PROVISIONED_IOPS" + // "PREEMPTIBLE_CPUS" + // "PREEMPTIBLE_LOCAL_SSD_GB" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_T4_GPUS" + // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_V100_GPUS" + // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" + // "PUBLIC_ADVERTISED_PREFIXES" + // "PUBLIC_DELEGATED_PREFIXES" + // "REGIONAL_AUTOSCALERS" + // "REGIONAL_INSTANCE_GROUP_MANAGERS" + // "RESERVATIONS" + // "RESOURCE_POLICIES" + // "ROUTERS" + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SNAPSHOTS" + // "SSD_TOTAL_GB" + // "SSL_CERTIFICATES" + // "STATIC_ADDRESSES" + // "STATIC_BYOIP_ADDRESSES" + // "SUBNETWORKS" + // "TARGET_HTTPS_PROXIES" + // "TARGET_HTTP_PROXIES" + // "TARGET_INSTANCES" + // "TARGET_POOLS" + // "TARGET_SSL_PROXIES" + // "TARGET_TCP_PROXIES" + // "TARGET_VPN_GATEWAYS" + // "URL_MAPS" + // "VPN_GATEWAYS" + // "VPN_TUNNELS" + // "XPN_SERVICE_PROJECTS" + Metric string `json:"metric,omitempty"` + + // Owner: [Output Only] Owning resource. This is the resource on which + // this quota is applied. + Owner string `json:"owner,omitempty"` + + // Usage: [Output Only] Current usage of this metric. + Usage float64 `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Limit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Limit") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Quota) MarshalJSON() ([]byte, error) { + type NoMethod Quota + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Quota) UnmarshalJSON(data []byte) error { + type NoMethod Quota + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + Usage gensupport.JSONFloat64 `json:"usage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + s.Usage = float64(s1.Usage) + return nil +} + +// Reference: Represents a reference to a resource. +type Reference struct { + // Kind: [Output Only] Type of the resource. Always compute#reference + // for references. + Kind string `json:"kind,omitempty"` + + // ReferenceType: A description of the reference type with no implied + // semantics. Possible values include: + // - MEMBER_OF + ReferenceType string `json:"referenceType,omitempty"` + + // Referrer: URL of the resource which refers to the target. + Referrer string `json:"referrer,omitempty"` + + // Target: URL of the resource to which this reference points. + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Reference) MarshalJSON() ([]byte, error) { + type NoMethod Reference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Region: Represents a Region resource. +// +// A region is a geographical area where a resource is located. For more +// information, read Regions and Zones. (== resource_for +// {$api_version}.regions ==) +type Region struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated -- [Output Only] The deprecation status associated with + // this region. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#region for + // regions. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this region. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the region, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // SupportsPzs: [Output Only] Reserved for future use. + SupportsPzs bool `json:"supportsPzs,omitempty"` + + // Zones: [Output Only] A list of zones available in this region, in the + // form of resource URLs. + Zones []string `json:"zones,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -30630,6 +32480,9 @@ type Reservation struct { // be a dash. Name string `json:"name,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // SelfLink: [Output Only] Server-defined fully-qualified URL for this // resource. SelfLink string `json:"selfLink,omitempty"` @@ -31447,6 +33300,10 @@ type ResourcePolicy struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // InstanceSchedulePolicy: Resource policy for scheduling instance + // operations. + InstanceSchedulePolicy *ResourcePolicyInstanceSchedulePolicy `json:"instanceSchedulePolicy,omitempty"` + // Kind: [Output Only] Type of the resource. Always // compute#resource_policies for resource policies. Kind string `json:"kind,omitempty"` @@ -31463,6 +33320,10 @@ type ResourcePolicy struct { Region string `json:"region,omitempty"` + // ResourceStatus: [Output Only] The system status of the resource + // policy. + ResourceStatus *ResourcePolicyResourceStatus `json:"resourceStatus,omitempty"` + // SelfLink: [Output Only] Server-defined fully-qualified URL for this // resource. SelfLink string `json:"selfLink,omitempty"` @@ -31476,6 +33337,7 @@ type ResourcePolicy struct { // Possible values: // "CREATING" // "DELETING" + // "EXPIRED" // "INVALID" // "READY" Status string `json:"status,omitempty"` @@ -31792,6 +33654,83 @@ func (s *ResourcePolicyHourlyCycle) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResourcePolicyInstanceSchedulePolicy: An InstanceSchedulePolicy +// specifies when and how frequent certain operations are performed on +// the instance. +type ResourcePolicyInstanceSchedulePolicy struct { + // ExpirationTime: The expiration time of the schedule. The timestamp is + // an RFC3339 string. + ExpirationTime string `json:"expirationTime,omitempty"` + + // StartTime: The start time of the schedule. The timestamp is an + // RFC3339 string. + StartTime string `json:"startTime,omitempty"` + + // TimeZone: Specifies the time zone to be used in interpreting + // Schedule.schedule. The value of this field must be a time zone name + // from the tz database: http://en.wikipedia.org/wiki/Tz_database. + TimeZone string `json:"timeZone,omitempty"` + + // VmStartSchedule: Specifies the schedule for starting instances. + VmStartSchedule *ResourcePolicyInstanceSchedulePolicySchedule `json:"vmStartSchedule,omitempty"` + + // VmStopSchedule: Specifies the schedule for stopping instances. + VmStopSchedule *ResourcePolicyInstanceSchedulePolicySchedule `json:"vmStopSchedule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpirationTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExpirationTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ResourcePolicyInstanceSchedulePolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyInstanceSchedulePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResourcePolicyInstanceSchedulePolicySchedule: Schedule for an +// instance operation. +type ResourcePolicyInstanceSchedulePolicySchedule struct { + // Schedule: Specifies the frequency for the operation, using the + // unix-cron format. + Schedule string `json:"schedule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Schedule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Schedule") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResourcePolicyInstanceSchedulePolicySchedule) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyInstanceSchedulePolicySchedule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ResourcePolicyList struct { Etag string `json:"etag,omitempty"` @@ -31951,6 +33890,75 @@ func (s *ResourcePolicyListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResourcePolicyResourceStatus: Contains output only fields. Use this +// sub-message for all output fields set on ResourcePolicy. The internal +// structure of this "status" field should mimic the structure of +// ResourcePolicy proto specification. +type ResourcePolicyResourceStatus struct { + // InstanceSchedulePolicy: [Output Only] Specifies a set of output + // values reffering to the instance_schedule_policy system status. This + // field should have the same name as corresponding policy field. + InstanceSchedulePolicy *ResourcePolicyResourceStatusInstanceSchedulePolicyStatus `json:"instanceSchedulePolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InstanceSchedulePolicy") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceSchedulePolicy") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ResourcePolicyResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResourcePolicyResourceStatusInstanceSchedulePolicyStatus struct { + // LastRunStartTime: [Output Only] The last time the schedule + // successfully ran. The timestamp is an RFC3339 string. + LastRunStartTime string `json:"lastRunStartTime,omitempty"` + + // NextRunStartTime: [Output Only] The next time the schedule is planned + // to run. The actual time might be slightly different. The timestamp is + // an RFC3339 string. + NextRunStartTime string `json:"nextRunStartTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LastRunStartTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LastRunStartTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ResourcePolicyResourceStatusInstanceSchedulePolicyStatus) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyResourceStatusInstanceSchedulePolicyStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ResourcePolicySnapshotSchedulePolicy: A snapshot schedule policy // specifies when and how frequently snapshots are to be created for the // target disk. Also specifies how many and how long these scheduled @@ -32586,6 +34594,12 @@ type Router struct { // property when you create the resource. Description string `json:"description,omitempty"` + // EncryptedInterconnectRouter: Field to indicate if a router is + // dedicated to use with encrypted Interconnect Attachment + // (IPsec-encrypted Cloud Interconnect feature). + // Not currently available in all Interconnect locations. + EncryptedInterconnectRouter bool `json:"encryptedInterconnectRouter,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -32911,12 +34925,11 @@ type RouterBgpPeer struct { // in custom mode, which can take one of the following options: // - ALL_SUBNETS: Advertises all available subnets, including peer VPC // subnets. - // - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. - // - ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC - // network. Note that this field can only be populated if advertise_mode - // is CUSTOM and overrides the list defined for the router (in the "bgp" - // message). These groups are advertised in addition to any specified - // prefixes. Leave this field blank to advertise no custom groups. + // - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that + // this field can only be populated if advertise_mode is CUSTOM and + // overrides the list defined for the router (in the "bgp" message). + // These groups are advertised in addition to any specified prefixes. + // Leave this field blank to advertise no custom groups. // // Possible values: // "ALL_SUBNETS" @@ -33910,7 +35923,52 @@ func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. NextID: 13 +type ScalingScheduleStatus struct { + // LastStartTime: [Output Only] The last time the scaling schedule + // became active. Note: this is a timestamp when a schedule actually + // became active, not when it was planned to do so. The timestamp is in + // RFC3339 text format. + LastStartTime string `json:"lastStartTime,omitempty"` + + // NextStartTime: [Output Only] The next time the scaling schedule is to + // become active. Note: this is a timestamp when a schedule is planned + // to run, but the actual time might be slightly different. The + // timestamp is in RFC3339 text format. + NextStartTime string `json:"nextStartTime,omitempty"` + + // State: [Output Only] The current state of a scaling schedule. + // + // Possible values: + // "ACTIVE" + // "DISABLED" + // "OBSOLETE" + // "READY" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LastStartTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LastStartTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ScalingScheduleStatus) MarshalJSON() ([]byte, error) { + type NoMethod ScalingScheduleStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Scheduling: Sets the scheduling options for an Instance. NextID: 20 type Scheduling struct { // AutomaticRestart: Specifies whether the instance should be // automatically restarted if it is terminated by Compute Engine (not @@ -34379,7 +36437,7 @@ func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { // match conditions along with the action to be taken when traffic // matches this condition (allow or deny). type SecurityPolicyRule struct { - // Action: The Action to preform when the client connection triggers the + // Action: The Action to perform when the client connection triggers the // rule. Can currently be either "allow" or "deny()" where valid values // for status are 403, 404, and 502. Action string `json:"action,omitempty"` @@ -36442,9 +38500,9 @@ type Subnetwork struct { // subnetwork. Provide this property when you create the subnetwork. For // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and // non-overlapping within a network. Only IPv4 is supported. This field - // is set at resource creation time. This may be a RFC 1918 IP range, or - // a privately routed, non-RFC 1918 IP range, not belonging to Google. - // The range can be expanded after creation using expandIpCidrRange. + // is set at resource creation time. The range can be any range listed + // in the Valid ranges list. The range can be expanded after creation + // using expandIpCidrRange. IpCidrRange string `json:"ipCidrRange,omitempty"` // Ipv6CidrRange: [Output Only] The range of internal IPv6 addresses @@ -36933,7 +38991,7 @@ type SubnetworkLogConfig struct { // Metadata: Can only be specified if VPC flow logs for this subnetwork // is enabled. Configures whether all, none or a subset of metadata // fields should be added to the reported VPC flow logs. Default is - // INCLUDE_ALL_METADATA. + // EXCLUDE_ALL_METADATA. // // Possible values: // "CUSTOM_METADATA" @@ -36990,8 +39048,8 @@ type SubnetworkSecondaryRange struct { // secondary range. Provide this property when you create the // subnetwork. Ranges must be unique and non-overlapping with all // primary and secondary IP ranges within a network. Only IPv4 is - // supported. This may be a RFC 1918 IP range, or a privately, non-RFC - // 1918 IP range, not belonging to Google. + // supported. The range can be any range listed in the Valid ranges + // list. IpCidrRange string `json:"ipCidrRange,omitempty"` // RangeName: The name associated with this subnetwork secondary range, @@ -38280,6 +40338,15 @@ type TargetHttpsProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a TargetHttpsProxy. An + // up-to-date fingerprint must be provided in order to patch the + // TargetHttpsProxy; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the TargetHttpsProxy. + Fingerprint string `json:"fingerprint,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -39353,6 +41420,7 @@ type TargetPool struct { // // Possible values: // "CLIENT_IP" + // "CLIENT_IP_NO_DESTINATION" // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" @@ -40464,6 +42532,20 @@ type TargetTcpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // ProxyBind: This field only applies when the forwarding rule that + // references this target proxy has a loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. + // + // When this field is set to true, Envoy proxies set up inbound traffic + // interception and bind to the IP address and port specified in the + // forwarding rule. This is generally useful when using Traffic Director + // to configure Envoy as a gateway or middle proxy (in other words, not + // a sidecar proxy). The Envoy proxy listens for inbound requests and + // handles requests when it receives them. + // + // The default is false. + ProxyBind bool `json:"proxyBind,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before // sending data to the backend, either NONE or PROXY_V1. The default is // NONE. @@ -43297,6 +45379,14 @@ type VpnGatewayVpnGatewayInterface struct { // Id: The numeric ID of this VPN gateway interface. Id int64 `json:"id,omitempty"` + // InterconnectAttachment: URL of the interconnect attachment resource. + // When the value of this field is present, the VPN Gateway will be used + // for IPsec-encrypted Cloud Interconnect; all Egress or Ingress traffic + // for this VPN Gateway interface will go through the specified + // interconnect attachment resource. + // Not currently available in all Interconnect locations. + InterconnectAttachment string `json:"interconnectAttachment,omitempty"` + // IpAddress: [Output Only] The external IP address for this VPN gateway // interface. IpAddress string `json:"ipAddress,omitempty"` @@ -43601,7 +45691,7 @@ type VpnTunnel struct { // be used. // - NO_INCOMING_PACKETS: No incoming packets from peer. // - REJECTED: Tunnel configuration was rejected, can be result of being - // blacklisted. + // denied access. // - ALLOCATING_RESOURCES: Cloud VPN is in the process of allocating all // required resources. // - STOPPED: Tunnel is stopped due to its Forwarding Rules being @@ -44762,6 +46852,8 @@ type AcceleratorTypesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of accelerator types. +// +// - project: Project ID for this request. func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -44849,7 +46941,7 @@ func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *Accele // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *AcceleratorTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -44892,7 +46984,7 @@ func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44997,7 +47089,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -45050,6 +47142,10 @@ type AcceleratorTypesGetCall struct { } // Get: Returns the specified accelerator type. +// +// - acceleratorType: Name of the accelerator type to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -45095,7 +47191,7 @@ func (c *AcceleratorTypesGetCall) Header() http.Header { func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45217,6 +47313,9 @@ type AcceleratorTypesListCall struct { // List: Retrieves a list of accelerator types that are available to the // specified project. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -45292,7 +47391,7 @@ func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypes // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *AcceleratorTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -45335,7 +47434,7 @@ func (c *AcceleratorTypesListCall) Header() http.Header { func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45437,7 +47536,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -45495,6 +47594,8 @@ type AddressesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of addresses. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -45583,7 +47684,7 @@ func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggr // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *AddressesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -45626,7 +47727,7 @@ func (c *AddressesAggregatedListCall) Header() http.Header { func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45731,7 +47832,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -45783,6 +47884,10 @@ type AddressesDeleteCall struct { } // Delete: Deletes the specified address resource. +// +// - address: Name of the address resource to delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -45838,7 +47943,7 @@ func (c *AddressesDeleteCall) Header() http.Header { func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45961,6 +48066,10 @@ type AddressesGetCall struct { } // Get: Returns the specified address resource. +// +// - address: Name of the address resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -46007,7 +48116,7 @@ func (c *AddressesGetCall) Header() http.Header { func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46129,6 +48238,9 @@ type AddressesInsertCall struct { // Insert: Creates an address resource in the specified project by using // the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -46184,7 +48296,7 @@ func (c *AddressesInsertCall) Header() http.Header { func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46306,6 +48418,9 @@ type AddressesListCall struct { // List: Retrieves a list of addresses contained within the specified // region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list func (r *AddressesService) List(project string, region string) *AddressesListCall { c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -46382,7 +48497,7 @@ func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *AddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -46425,7 +48540,7 @@ func (c *AddressesListCall) Header() http.Header { func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46534,7 +48649,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -46585,6 +48700,8 @@ type AutoscalersAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of autoscalers. +// +// - project: Project ID for this request. func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -46672,7 +48789,7 @@ func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *Autoscalers // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *AutoscalersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -46715,7 +48832,7 @@ func (c *AutoscalersAggregatedListCall) Header() http.Header { func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46820,7 +48937,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -46872,6 +48989,10 @@ type AutoscalersDeleteCall struct { } // Delete: Deletes the specified autoscaler. +// +// - autoscaler: Name of the autoscaler to delete. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -46926,7 +49047,7 @@ func (c *AutoscalersDeleteCall) Header() http.Header { func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47050,6 +49171,10 @@ type AutoscalersGetCall struct { // Get: Returns the specified autoscaler resource. Gets a list of // available autoscalers by making a list() request. +// +// - autoscaler: Name of the autoscaler to return. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -47095,7 +49220,7 @@ func (c *AutoscalersGetCall) Header() http.Header { func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47217,6 +49342,9 @@ type AutoscalersInsertCall struct { // Insert: Creates an autoscaler in the specified project using the data // included in the request. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -47271,7 +49399,7 @@ func (c *AutoscalersInsertCall) Header() http.Header { func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47393,6 +49521,9 @@ type AutoscalersListCall struct { // List: Retrieves a list of autoscalers contained within the specified // zone. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -47468,7 +49599,7 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *AutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -47511,7 +49642,7 @@ func (c *AutoscalersListCall) Header() http.Header { func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47613,7 +49744,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -47674,6 +49805,9 @@ type AutoscalersPatchCall struct { // Patch: Updates an autoscaler in the specified project using the data // included in the request. This method supports PATCH semantics and // uses the JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -47735,7 +49869,7 @@ func (c *AutoscalersPatchCall) Header() http.Header { func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47863,6 +49997,9 @@ type AutoscalersUpdateCall struct { // Update: Updates an autoscaler in the specified project using the data // included in the request. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -47924,7 +50061,7 @@ func (c *AutoscalersUpdateCall) Header() http.Header { func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48052,6 +50189,10 @@ type BackendBucketsAddSignedUrlKeyCall struct { // AddSignedUrlKey: Adds a key for validating requests with signed URLs // for this backend bucket. +// +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48106,7 +50247,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48225,6 +50366,9 @@ type BackendBucketsDeleteCall struct { } // Delete: Deletes the specified BackendBucket resource. +// +// - backendBucket: Name of the BackendBucket resource to delete. +// - project: Project ID for this request. func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48278,7 +50422,7 @@ func (c *BackendBucketsDeleteCall) Header() http.Header { func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48391,6 +50535,11 @@ type BackendBucketsDeleteSignedUrlKeyCall struct { // DeleteSignedUrlKey: Deletes a key for validating requests with signed // URLs for this backend bucket. +// +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48445,7 +50594,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48565,6 +50714,9 @@ type BackendBucketsGetCall struct { // Get: Returns the specified BackendBucket resource. Gets a list of // available backend buckets by making a list() request. +// +// - backendBucket: Name of the BackendBucket resource to return. +// - project: Project ID for this request. func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48609,7 +50761,7 @@ func (c *BackendBucketsGetCall) Header() http.Header { func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48721,6 +50873,8 @@ type BackendBucketsInsertCall struct { // Insert: Creates a BackendBucket resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48774,7 +50928,7 @@ func (c *BackendBucketsInsertCall) Header() http.Header { func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48886,6 +51040,8 @@ type BackendBucketsListCall struct { // List: Retrieves the list of BackendBucket resources available to the // specified project. +// +// - project: Project ID for this request. func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48960,7 +51116,7 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *BackendBucketsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendBucketsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -49003,7 +51159,7 @@ func (c *BackendBucketsListCall) Header() http.Header { func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49103,7 +51259,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -49157,6 +51313,9 @@ type BackendBucketsPatchCall struct { // Patch: Updates the specified BackendBucket resource with the data // included in the request. This method supports PATCH semantics and // uses the JSON merge patch format and processing rules. +// +// - backendBucket: Name of the BackendBucket resource to patch. +// - project: Project ID for this request. func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -49211,7 +51370,7 @@ func (c *BackendBucketsPatchCall) Header() http.Header { func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49333,6 +51492,9 @@ type BackendBucketsUpdateCall struct { // Update: Updates the specified BackendBucket resource with the data // included in the request. +// +// - backendBucket: Name of the BackendBucket resource to update. +// - project: Project ID for this request. func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -49387,7 +51549,7 @@ func (c *BackendBucketsUpdateCall) Header() http.Header { func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49509,6 +51671,10 @@ type BackendServicesAddSignedUrlKeyCall struct { // AddSignedUrlKey: Adds a key for validating requests with signed URLs // for this backend service. +// +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -49563,7 +51729,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49683,6 +51849,8 @@ type BackendServicesAggregatedListCall struct { // AggregatedList: Retrieves the list of all BackendService resources, // regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -49770,7 +51938,7 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *BackendServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -49813,7 +51981,7 @@ func (c *BackendServicesAggregatedListCall) Header() http.Header { func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49918,7 +52086,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -49969,6 +52137,9 @@ type BackendServicesDeleteCall struct { } // Delete: Deletes the specified BackendService resource. +// +// - backendService: Name of the BackendService resource to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -50023,7 +52194,7 @@ func (c *BackendServicesDeleteCall) Header() http.Header { func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50136,6 +52307,11 @@ type BackendServicesDeleteSignedUrlKeyCall struct { // DeleteSignedUrlKey: Deletes a key for validating requests with signed // URLs for this backend service. +// +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -50190,7 +52366,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50310,6 +52486,9 @@ type BackendServicesGetCall struct { // Get: Returns the specified BackendService resource. Gets a list of // available backend services. +// +// - backendService: Name of the BackendService resource to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -50355,7 +52534,7 @@ func (c *BackendServicesGetCall) Header() http.Header { func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50472,6 +52651,10 @@ type BackendServicesGetHealthCall struct { // Example request body: // // { "group": "/zones/us-east1-b/instanceGroups/lb-backend-example" } +// +// - backendService: Name of the BackendService resource to which the +// queried instance belongs. +// - project: . // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -50508,7 +52691,7 @@ func (c *BackendServicesGetHealthCall) Header() http.Header { func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50625,6 +52808,8 @@ type BackendServicesInsertCall struct { // Insert: Creates a BackendService resource in the specified project // using the data included in the request. For more information, see // Backend services overview. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -50679,7 +52864,7 @@ func (c *BackendServicesInsertCall) Header() http.Header { func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50791,6 +52976,8 @@ type BackendServicesListCall struct { // List: Retrieves the list of BackendService resources available to the // specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list func (r *BackendServicesService) List(project string) *BackendServicesListCall { c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -50866,7 +53053,7 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *BackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -50909,7 +53096,7 @@ func (c *BackendServicesListCall) Header() http.Header { func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51009,7 +53196,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -51064,6 +53251,9 @@ type BackendServicesPatchCall struct { // included in the request. For more information, see Backend services // overview. This method supports PATCH semantics and uses the JSON // merge patch format and processing rules. +// +// - backendService: Name of the BackendService resource to patch. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -51119,7 +53309,7 @@ func (c *BackendServicesPatchCall) Header() http.Header { func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51239,8 +53429,13 @@ type BackendServicesSetSecurityPolicyCall struct { header_ http.Header } -// SetSecurityPolicy: Sets the security policy for the specified backend -// service. +// SetSecurityPolicy: Sets the Google Cloud Armor security policy for +// the specified backend service. For more information, see Google Cloud +// Armor Overview +// +// - backendService: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -51295,7 +53490,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51360,7 +53555,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the security policy for the specified backend service.", + // "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", // "httpMethod": "POST", // "id": "compute.backendServices.setSecurityPolicy", // "parameterOrder": [ @@ -51417,6 +53612,9 @@ type BackendServicesUpdateCall struct { // Update: Updates the specified BackendService resource with the data // included in the request. For more information, see Backend services // overview. +// +// - backendService: Name of the BackendService resource to update. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -51472,7 +53670,7 @@ func (c *BackendServicesUpdateCall) Header() http.Header { func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51592,6 +53790,8 @@ type DiskTypesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of disk types. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -51680,7 +53880,7 @@ func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggr // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *DiskTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -51723,7 +53923,7 @@ func (c *DiskTypesAggregatedListCall) Header() http.Header { func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51828,7 +54028,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -51882,6 +54082,10 @@ type DiskTypesGetCall struct { // Get: Returns the specified disk type. Gets a list of available disk // types by making a list() request. +// +// - diskType: Name of the disk type to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -51928,7 +54132,7 @@ func (c *DiskTypesGetCall) Header() http.Header { func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52050,6 +54254,9 @@ type DiskTypesListCall struct { // List: Retrieves a list of disk types available to the specified // project. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -52126,7 +54333,7 @@ func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *DiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -52169,7 +54376,7 @@ func (c *DiskTypesListCall) Header() http.Header { func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52271,7 +54478,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -52333,6 +54540,10 @@ type DisksAddResourcePoliciesCall struct { // AddResourcePolicies: Adds existing resource policies to a disk. You // can only add one policy which will be applied to this disk for // scheduling snapshot creation. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -52388,7 +54599,7 @@ func (c *DisksAddResourcePoliciesCall) Header() http.Header { func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52517,6 +54728,8 @@ type DisksAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of persistent disks. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -52605,7 +54818,7 @@ func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *DisksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -52648,7 +54861,7 @@ func (c *DisksAggregatedListCall) Header() http.Header { func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52753,7 +54966,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -52806,6 +55019,10 @@ type DisksCreateSnapshotCall struct { } // CreateSnapshot: Creates a snapshot of a specified persistent disk. +// +// - disk: Name of the persistent disk to snapshot. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -52871,7 +55088,7 @@ func (c *DisksCreateSnapshotCall) Header() http.Header { func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53009,6 +55226,10 @@ type DisksDeleteCall struct { // removes its data permanently and is irreversible. However, deleting a // disk does not delete any snapshots previously made from the disk. You // must separately delete snapshots. +// +// - disk: Name of the persistent disk to delete. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -53064,7 +55285,7 @@ func (c *DisksDeleteCall) Header() http.Header { func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53187,6 +55408,10 @@ type DisksGetCall struct { // Get: Returns a specified persistent disk. Gets a list of available // persistent disks by making a list() request. +// +// - disk: Name of the persistent disk to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -53233,7 +55458,7 @@ func (c *DisksGetCall) Header() http.Header { func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53356,6 +55581,10 @@ type DisksGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -53408,7 +55637,7 @@ func (c *DisksGetIamPolicyCall) Header() http.Header { func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53540,6 +55769,9 @@ type DisksInsertCall struct { // GB data disk by omitting all properties. You can also create a disk // that is larger than the default size by specifying the sizeGb // property. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -53602,7 +55834,7 @@ func (c *DisksInsertCall) Header() http.Header { func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53729,6 +55961,9 @@ type DisksListCall struct { // List: Retrieves a list of persistent disks contained within the // specified zone. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list func (r *DisksService) List(project string, zone string) *DisksListCall { c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -53805,7 +56040,7 @@ func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *DisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -53848,7 +56083,7 @@ func (c *DisksListCall) Header() http.Header { func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53950,7 +56185,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -54010,6 +56245,10 @@ type DisksRemoveResourcePoliciesCall struct { } // RemoveResourcePolicies: Removes resource policies from a disk. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54065,7 +56304,7 @@ func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54197,6 +56436,10 @@ type DisksResizeCall struct { // Resize: Resizes the specified persistent disk. You can only increase // the size of the disk. +// +// - disk: The name of the persistent disk. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54252,7 +56495,7 @@ func (c *DisksResizeCall) Header() http.Header { func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54384,6 +56627,10 @@ type DisksSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54420,7 +56667,7 @@ func (c *DisksSetIamPolicyCall) Header() http.Header { func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54547,6 +56794,10 @@ type DisksSetLabelsCall struct { // SetLabels: Sets the labels on a disk. To learn more about labels, // read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54602,7 +56853,7 @@ func (c *DisksSetLabelsCall) Header() http.Header { func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54734,6 +56985,10 @@ type DisksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54770,7 +57025,7 @@ func (c *DisksTestIamPermissionsCall) Header() http.Header { func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54895,6 +57150,9 @@ type ExternalVpnGatewaysDeleteCall struct { } // Delete: Deletes the specified externalVpnGateway. +// +// - externalVpnGateway: Name of the externalVpnGateways to delete. +// - project: Project ID for this request. func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54948,7 +57206,7 @@ func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55062,6 +57320,9 @@ type ExternalVpnGatewaysGetCall struct { // Get: Returns the specified externalVpnGateway. Get a list of // available externalVpnGateways by making a list() request. +// +// - externalVpnGateway: Name of the externalVpnGateway to return. +// - project: Project ID for this request. func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -55106,7 +57367,7 @@ func (c *ExternalVpnGatewaysGetCall) Header() http.Header { func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55218,6 +57479,8 @@ type ExternalVpnGatewaysInsertCall struct { // Insert: Creates a ExternalVpnGateway in the specified project using // the data included in the request. +// +// - project: Project ID for this request. func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -55271,7 +57534,7 @@ func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55383,6 +57646,8 @@ type ExternalVpnGatewaysListCall struct { // List: Retrieves the list of ExternalVpnGateway available to the // specified project. +// +// - project: Project ID for this request. func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -55457,7 +57722,7 @@ func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ExternalVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ExternalVpnGatewaysListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -55500,7 +57765,7 @@ func (c *ExternalVpnGatewaysListCall) Header() http.Header { func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55600,7 +57865,7 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -55653,6 +57918,9 @@ type ExternalVpnGatewaysSetLabelsCall struct { // SetLabels: Sets the labels on an ExternalVpnGateway. To learn more // about labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -55688,7 +57956,7 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55805,6 +58073,9 @@ type ExternalVpnGatewaysTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -55840,7 +58111,7 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55957,6 +58228,8 @@ type FirewallPoliciesAddAssociationCall struct { // AddAssociation: Inserts an association for the specified firewall // policy. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) AddAssociation(firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *FirewallPoliciesAddAssociationCall { c := &FirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -56020,7 +58293,7 @@ func (c *FirewallPoliciesAddAssociationCall) Header() http.Header { func (c *FirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56136,6 +58409,8 @@ type FirewallPoliciesAddRuleCall struct { } // AddRule: Inserts a rule into a firewall policy. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) AddRule(firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *FirewallPoliciesAddRuleCall { c := &FirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -56189,7 +58464,7 @@ func (c *FirewallPoliciesAddRuleCall) Header() http.Header { func (c *FirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56299,6 +58574,8 @@ type FirewallPoliciesCloneRulesCall struct { } // CloneRules: Copies rules to the specified firewall policy. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) CloneRules(firewallPolicy string) *FirewallPoliciesCloneRulesCall { c := &FirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -56358,7 +58635,7 @@ func (c *FirewallPoliciesCloneRulesCall) Header() http.Header { func (c *FirewallPoliciesCloneRulesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56465,6 +58742,8 @@ type FirewallPoliciesDeleteCall struct { } // Delete: Deletes the specified policy. +// +// - firewallPolicy: Name of the firewall policy to delete. func (r *FirewallPoliciesService) Delete(firewallPolicy string) *FirewallPoliciesDeleteCall { c := &FirewallPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -56517,7 +58796,7 @@ func (c *FirewallPoliciesDeleteCall) Header() http.Header { func (c *FirewallPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56620,6 +58899,8 @@ type FirewallPoliciesGetCall struct { } // Get: Returns the specified firewall policy. +// +// - firewallPolicy: Name of the firewall policy to get. func (r *FirewallPoliciesService) Get(firewallPolicy string) *FirewallPoliciesGetCall { c := &FirewallPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -56663,7 +58944,7 @@ func (c *FirewallPoliciesGetCall) Header() http.Header { func (c *FirewallPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56765,6 +59046,9 @@ type FirewallPoliciesGetAssociationCall struct { } // GetAssociation: Gets an association with the specified name. +// +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. func (r *FirewallPoliciesService) GetAssociation(firewallPolicy string) *FirewallPoliciesGetAssociationCall { c := &FirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -56815,7 +59099,7 @@ func (c *FirewallPoliciesGetAssociationCall) Header() http.Header { func (c *FirewallPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56923,6 +59207,8 @@ type FirewallPoliciesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - resource: Name or id of the resource for this request. func (r *FirewallPoliciesService) GetIamPolicy(resource string) *FirewallPoliciesGetIamPolicyCall { c := &FirewallPoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -56973,7 +59259,7 @@ func (c *FirewallPoliciesGetIamPolicyCall) Header() http.Header { func (c *FirewallPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57081,6 +59367,9 @@ type FirewallPoliciesGetRuleCall struct { } // GetRule: Gets a rule of the specified priority. +// +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. func (r *FirewallPoliciesService) GetRule(firewallPolicy string) *FirewallPoliciesGetRuleCall { c := &FirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -57131,7 +59420,7 @@ func (c *FirewallPoliciesGetRuleCall) Header() http.Header { func (c *FirewallPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57300,7 +59589,7 @@ func (c *FirewallPoliciesInsertCall) Header() http.Header { func (c *FirewallPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57483,7 +59772,7 @@ func (c *FirewallPoliciesListCall) ParentId(parentId string) *FirewallPoliciesLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *FirewallPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallPoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -57526,7 +59815,7 @@ func (c *FirewallPoliciesListCall) Header() http.Header { func (c *FirewallPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57618,7 +59907,7 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -57719,7 +60008,7 @@ func (c *FirewallPoliciesListAssociationsCall) Header() http.Header { func (c *FirewallPoliciesListAssociationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57813,6 +60102,8 @@ type FirewallPoliciesMoveCall struct { } // Move: Moves the specified firewall policy. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) Move(firewallPolicy string) *FirewallPoliciesMoveCall { c := &FirewallPoliciesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -57872,7 +60163,7 @@ func (c *FirewallPoliciesMoveCall) Header() http.Header { func (c *FirewallPoliciesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57981,6 +60272,8 @@ type FirewallPoliciesPatchCall struct { // Patch: Patches the specified policy with the data included in the // request. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) Patch(firewallPolicy string, firewallpolicy *FirewallPolicy) *FirewallPoliciesPatchCall { c := &FirewallPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -58034,7 +60327,7 @@ func (c *FirewallPoliciesPatchCall) Header() http.Header { func (c *FirewallPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58145,6 +60438,8 @@ type FirewallPoliciesPatchRuleCall struct { } // PatchRule: Patches a rule of the specified priority. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) PatchRule(firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *FirewallPoliciesPatchRuleCall { c := &FirewallPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -58205,7 +60500,7 @@ func (c *FirewallPoliciesPatchRuleCall) Header() http.Header { func (c *FirewallPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58322,6 +60617,8 @@ type FirewallPoliciesRemoveAssociationCall struct { // RemoveAssociation: Removes an association for the specified firewall // policy. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) RemoveAssociation(firewallPolicy string) *FirewallPoliciesRemoveAssociationCall { c := &FirewallPoliciesRemoveAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -58381,7 +60678,7 @@ func (c *FirewallPoliciesRemoveAssociationCall) Header() http.Header { func (c *FirewallPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58488,6 +60785,8 @@ type FirewallPoliciesRemoveRuleCall struct { } // RemoveRule: Deletes a rule of the specified priority. +// +// - firewallPolicy: Name of the firewall policy to update. func (r *FirewallPoliciesService) RemoveRule(firewallPolicy string) *FirewallPoliciesRemoveRuleCall { c := &FirewallPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -58547,7 +60846,7 @@ func (c *FirewallPoliciesRemoveRuleCall) Header() http.Header { func (c *FirewallPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58657,6 +60956,8 @@ type FirewallPoliciesSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - resource: Name or id of the resource for this request. func (r *FirewallPoliciesService) SetIamPolicy(resource string, globalorganizationsetpolicyrequest *GlobalOrganizationSetPolicyRequest) *FirewallPoliciesSetIamPolicyCall { c := &FirewallPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -58691,7 +60992,7 @@ func (c *FirewallPoliciesSetIamPolicyCall) Header() http.Header { func (c *FirewallPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58798,6 +61099,8 @@ type FirewallPoliciesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - resource: Name or id of the resource for this request. func (r *FirewallPoliciesService) TestIamPermissions(resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallPoliciesTestIamPermissionsCall { c := &FirewallPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -58832,7 +61135,7 @@ func (c *FirewallPoliciesTestIamPermissionsCall) Header() http.Header { func (c *FirewallPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58939,6 +61242,9 @@ type FirewallsDeleteCall struct { } // Delete: Deletes the specified firewall. +// +// - firewall: Name of the firewall rule to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -58993,7 +61299,7 @@ func (c *FirewallsDeleteCall) Header() http.Header { func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59106,6 +61412,9 @@ type FirewallsGetCall struct { } // Get: Returns the specified firewall. +// +// - firewall: Name of the firewall rule to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -59151,7 +61460,7 @@ func (c *FirewallsGetCall) Header() http.Header { func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59263,6 +61572,8 @@ type FirewallsInsertCall struct { // Insert: Creates a firewall rule in the specified project using the // data included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -59317,7 +61628,7 @@ func (c *FirewallsInsertCall) Header() http.Header { func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59429,6 +61740,8 @@ type FirewallsListCall struct { // List: Retrieves the list of firewall rules available to the specified // project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list func (r *FirewallsService) List(project string) *FirewallsListCall { c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -59504,7 +61817,7 @@ func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *FirewallsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -59547,7 +61860,7 @@ func (c *FirewallsListCall) Header() http.Header { func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59647,7 +61960,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -59701,6 +62014,9 @@ type FirewallsPatchCall struct { // Patch: Updates the specified firewall rule with the data included in // the request. This method supports PATCH semantics and uses the JSON // merge patch format and processing rules. +// +// - firewall: Name of the firewall rule to patch. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -59756,7 +62072,7 @@ func (c *FirewallsPatchCall) Header() http.Header { func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59880,6 +62196,9 @@ type FirewallsUpdateCall struct { // the request. Note that all fields will be updated if using PUT, even // fields that are not specified. To update individual fields, please // use PATCH instead. +// +// - firewall: Name of the firewall rule to update. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -59935,7 +62254,7 @@ func (c *FirewallsUpdateCall) Header() http.Header { func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60055,6 +62374,8 @@ type ForwardingRulesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of forwarding rules. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -60143,7 +62464,7 @@ func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *Forward // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ForwardingRulesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -60186,7 +62507,7 @@ func (c *ForwardingRulesAggregatedListCall) Header() http.Header { func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60291,7 +62612,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -60343,6 +62664,10 @@ type ForwardingRulesDeleteCall struct { } // Delete: Deletes the specified ForwardingRule resource. +// +// - forwardingRule: Name of the ForwardingRule resource to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -60398,7 +62723,7 @@ func (c *ForwardingRulesDeleteCall) Header() http.Header { func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60521,6 +62846,10 @@ type ForwardingRulesGetCall struct { } // Get: Returns the specified ForwardingRule resource. +// +// - forwardingRule: Name of the ForwardingRule resource to return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -60567,7 +62896,7 @@ func (c *ForwardingRulesGetCall) Header() http.Header { func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60689,6 +63018,9 @@ type ForwardingRulesInsertCall struct { // Insert: Creates a ForwardingRule resource in the specified project // and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -60744,7 +63076,7 @@ func (c *ForwardingRulesInsertCall) Header() http.Header { func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60866,6 +63198,9 @@ type ForwardingRulesListCall struct { // List: Retrieves a list of ForwardingRule resources available to the // specified project and region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -60942,7 +63277,7 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -60985,7 +63320,7 @@ func (c *ForwardingRulesListCall) Header() http.Header { func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61094,7 +63429,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -61150,6 +63485,10 @@ type ForwardingRulesPatchCall struct { // in the request. This method supports PATCH semantics and uses the // JSON merge patch format and processing rules. Currently, you can only // patch the network_tier field. +// +// - forwardingRule: Name of the ForwardingRule resource to patch. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *ForwardingRulesService) Patch(project string, region string, forwardingRule string, forwardingrule *ForwardingRule) *ForwardingRulesPatchCall { c := &ForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61205,7 +63544,7 @@ func (c *ForwardingRulesPatchCall) Header() http.Header { func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61337,6 +63676,10 @@ type ForwardingRulesSetLabelsCall struct { // SetLabels: Sets the labels on the specified resource. To learn more // about labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61392,7 +63735,7 @@ func (c *ForwardingRulesSetLabelsCall) Header() http.Header { func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61524,6 +63867,11 @@ type ForwardingRulesSetTargetCall struct { // SetTarget: Changes target URL for forwarding rule. The new target // should be of the same type as the old target. +// +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -61580,7 +63928,7 @@ func (c *ForwardingRulesSetTargetCall) Header() http.Header { func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61709,6 +64057,9 @@ type GlobalAddressesDeleteCall struct { } // Delete: Deletes the specified address resource. +// +// - address: Name of the address resource to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -61763,7 +64114,7 @@ func (c *GlobalAddressesDeleteCall) Header() http.Header { func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61877,6 +64228,9 @@ type GlobalAddressesGetCall struct { // Get: Returns the specified address resource. Gets a list of available // addresses by making a list() request. +// +// - address: Name of the address resource to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -61922,7 +64276,7 @@ func (c *GlobalAddressesGetCall) Header() http.Header { func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62034,6 +64388,8 @@ type GlobalAddressesInsertCall struct { // Insert: Creates an address resource in the specified project by using // the data included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -62088,7 +64444,7 @@ func (c *GlobalAddressesInsertCall) Header() http.Header { func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62199,6 +64555,8 @@ type GlobalAddressesListCall struct { } // List: Retrieves a list of global addresses. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -62274,7 +64632,7 @@ func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *GlobalAddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalAddressesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -62317,7 +64675,7 @@ func (c *GlobalAddressesListCall) Header() http.Header { func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62417,7 +64775,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -62468,6 +64826,9 @@ type GlobalForwardingRulesDeleteCall struct { } // Delete: Deletes the specified GlobalForwardingRule resource. +// +// - forwardingRule: Name of the ForwardingRule resource to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -62522,7 +64883,7 @@ func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62636,6 +64997,9 @@ type GlobalForwardingRulesGetCall struct { // Get: Returns the specified GlobalForwardingRule resource. Gets a list // of available forwarding rules by making a list() request. +// +// - forwardingRule: Name of the ForwardingRule resource to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -62681,7 +65045,7 @@ func (c *GlobalForwardingRulesGetCall) Header() http.Header { func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62793,6 +65157,8 @@ type GlobalForwardingRulesInsertCall struct { // Insert: Creates a GlobalForwardingRule resource in the specified // project using the data included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -62847,7 +65213,7 @@ func (c *GlobalForwardingRulesInsertCall) Header() http.Header { func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62959,6 +65325,8 @@ type GlobalForwardingRulesListCall struct { // List: Retrieves a list of GlobalForwardingRule resources available to // the specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -63034,7 +65402,7 @@ func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *GlobalForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalForwardingRulesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -63077,7 +65445,7 @@ func (c *GlobalForwardingRulesListCall) Header() http.Header { func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63177,7 +65545,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -63232,6 +65600,9 @@ type GlobalForwardingRulesPatchCall struct { // in the request. This method supports PATCH semantics and uses the // JSON merge patch format and processing rules. Currently, you can only // patch the network_tier field. +// +// - forwardingRule: Name of the ForwardingRule resource to patch. +// - project: Project ID for this request. func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule string, forwardingrule *ForwardingRule) *GlobalForwardingRulesPatchCall { c := &GlobalForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63286,7 +65657,7 @@ func (c *GlobalForwardingRulesPatchCall) Header() http.Header { func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63408,6 +65779,9 @@ type GlobalForwardingRulesSetLabelsCall struct { // SetLabels: Sets the labels on the specified resource. To learn more // about labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63443,7 +65817,7 @@ func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63560,6 +65934,10 @@ type GlobalForwardingRulesSetTargetCall struct { // SetTarget: Changes target URL for the GlobalForwardingRule resource. // The new target should be of the same type as the old target. +// +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -63615,7 +65993,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63737,6 +66115,11 @@ type GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall struct { // AttachNetworkEndpoints: Attach a network endpoint to the specified // network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63791,7 +66174,7 @@ func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.He func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63911,6 +66294,10 @@ type GlobalNetworkEndpointGroupsDeleteCall struct { // Delete: Deletes the specified network endpoint group.Note that the // NEG cannot be deleted if there are backend services referencing it. +// +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) Delete(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsDeleteCall { c := &GlobalNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63964,7 +66351,7 @@ func (c *GlobalNetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64077,6 +66464,10 @@ type GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall struct { // DetachNetworkEndpoints: Detach the network endpoint from the // specified network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64131,7 +66522,7 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.He func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64252,6 +66643,10 @@ type GlobalNetworkEndpointGroupsGetCall struct { // Get: Returns the specified network endpoint group. Gets a list of // available network endpoint groups by making a list() request. +// +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) Get(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsGetCall { c := &GlobalNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64296,7 +66691,7 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64407,6 +66802,8 @@ type GlobalNetworkEndpointGroupsInsertCall struct { // Insert: Creates a network endpoint group in the specified project // using the parameters that are included in the request. +// +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) Insert(project string, networkendpointgroup *NetworkEndpointGroup) *GlobalNetworkEndpointGroupsInsertCall { c := &GlobalNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64460,7 +66857,7 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64572,6 +66969,8 @@ type GlobalNetworkEndpointGroupsListCall struct { // List: Retrieves the list of network endpoint groups that are located // in the specified project. +// +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetworkEndpointGroupsListCall { c := &GlobalNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64646,7 +67045,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *Globa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *GlobalNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -64689,7 +67088,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64789,7 +67188,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -64841,6 +67240,11 @@ type GlobalNetworkEndpointGroupsListNetworkEndpointsCall struct { // ListNetworkEndpoints: Lists the network endpoints in the specified // network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64916,7 +67320,7 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToke // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -64949,7 +67353,7 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Head func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65056,7 +67460,7 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -65107,6 +67511,8 @@ type GlobalOperationsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of all operations. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -65195,7 +67601,7 @@ func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *Global // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *GlobalOperationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -65238,7 +67644,7 @@ func (c *GlobalOperationsAggregatedListCall) Header() http.Header { func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65343,7 +67749,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -65394,6 +67800,9 @@ type GlobalOperationsDeleteCall struct { } // Delete: Deletes the specified Operations resource. +// +// - operation: Name of the Operations resource to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -65429,7 +67838,7 @@ func (c *GlobalOperationsDeleteCall) Header() http.Header { func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65510,6 +67919,9 @@ type GlobalOperationsGetCall struct { // Get: Retrieves the specified Operations resource. Gets a list of // operations by making a `list()` request. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -65555,7 +67967,7 @@ func (c *GlobalOperationsGetCall) Header() http.Header { func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65667,6 +68079,8 @@ type GlobalOperationsListCall struct { // List: Retrieves a list of Operation resources contained within the // specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -65742,7 +68156,7 @@ func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperations // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *GlobalOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -65785,7 +68199,7 @@ func (c *GlobalOperationsListCall) Header() http.Header { func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65885,7 +68299,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -65949,6 +68363,9 @@ type GlobalOperationsWaitCall struct { // - If the default deadline is reached, there is no guarantee that the // operation is actually done when the method returns. Be prepared to // retry if the operation is not `DONE`. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. func (r *GlobalOperationsService) Wait(project string, operation string) *GlobalOperationsWaitCall { c := &GlobalOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -65983,7 +68400,7 @@ func (c *GlobalOperationsWaitCall) Header() http.Header { func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66090,6 +68507,8 @@ type GlobalOrganizationOperationsDeleteCall struct { } // Delete: Deletes the specified Operations resource. +// +// - operation: Name of the Operations resource to delete. func (r *GlobalOrganizationOperationsService) Delete(operation string) *GlobalOrganizationOperationsDeleteCall { c := &GlobalOrganizationOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.operation = operation @@ -66130,7 +68549,7 @@ func (c *GlobalOrganizationOperationsDeleteCall) Header() http.Header { func (c *GlobalOrganizationOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66206,6 +68625,8 @@ type GlobalOrganizationOperationsGetCall struct { // Get: Retrieves the specified Operations resource. Gets a list of // operations by making a `list()` request. +// +// - operation: Name of the Operations resource to return. func (r *GlobalOrganizationOperationsService) Get(operation string) *GlobalOrganizationOperationsGetCall { c := &GlobalOrganizationOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.operation = operation @@ -66256,7 +68677,7 @@ func (c *GlobalOrganizationOperationsGetCall) Header() http.Header { func (c *GlobalOrganizationOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66443,7 +68864,7 @@ func (c *GlobalOrganizationOperationsListCall) ParentId(parentId string) *Global // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *GlobalOrganizationOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOrganizationOperationsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -66486,7 +68907,7 @@ func (c *GlobalOrganizationOperationsListCall) Header() http.Header { func (c *GlobalOrganizationOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66578,7 +68999,7 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -66617,116 +69038,203 @@ func (c *GlobalOrganizationOperationsListCall) Pages(ctx context.Context, f func } } -// method id "compute.healthChecks.aggregatedList": +// method id "compute.globalPublicDelegatedPrefixes.delete": -type HealthChecksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalPublicDelegatedPrefixesDeleteCall struct { + s *Service + project string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of all HealthCheck resources, -// regional and global, available to the specified project. -func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { - c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified global PublicDelegatedPrefix. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. +func (r *GlobalPublicDelegatedPrefixesService) Delete(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesDeleteCall { + c := &GlobalPublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.publicDelegatedPrefix = publicDelegatedPrefix return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either `=`, `!=`, `>`, or -// `<`. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also filter nested fields. For example, you could specify -// `scheduling.automaticRestart = false` to include instances only if -// they are not scheduled for automatic restarts. You can use filtering -// on nested fields to filter based on resource labels. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example: ``` (scheduling.automaticRestart = -// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression -// is an `AND` expression. However, you can include `AND` and `OR` -// expressions explicitly. For example: ``` (cpuPlatform = "Intel -// Skylake") OR (cpuPlatform = "Intel Broadwell") AND -// (scheduling.automaticRestart = true) ``` -func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalPublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *HealthChecksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *HealthChecksAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesDeleteCall { + c.ctx_ = ctx return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using `orderBy="creationTimestamp desc". This sorts -// results based on the `creationTimestamp` field in reverse -// chronological order (newest result first). Use this to sort resources -// like operations so that the newest operation is returned -// first. -// -// Currently, only sorting by `name` or `creationTimestamp desc` is -// supported. -func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c +func (c *GlobalPublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "publicDelegatedPrefix": c.publicDelegatedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false and the logic is the same as today. -func (c *HealthChecksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// Do executes the "compute.globalPublicDelegatedPrefixes.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified global PublicDelegatedPrefix.", + // "httpMethod": "DELETE", + // "id": "compute.globalPublicDelegatedPrefixes.delete", + // "parameterOrder": [ + // "project", + // "publicDelegatedPrefix" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalPublicDelegatedPrefixes.get": + +type GlobalPublicDelegatedPrefixesGetCall struct { + s *Service + project string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified global PublicDelegatedPrefix resource. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. +func (r *GlobalPublicDelegatedPrefixesService) Get(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesGetCall { + c := &GlobalPublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicDelegatedPrefix = publicDelegatedPrefix return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { +func (c *GlobalPublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66736,7 +69244,7 @@ func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { +func (c *GlobalPublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *GlobalPublicDelegatedPrefixesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -66744,23 +69252,23 @@ func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { +func (c *GlobalPublicDelegatedPrefixesGetCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksAggregatedListCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66771,7 +69279,7 @@ func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -66779,19 +69287,20 @@ func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.aggregatedList" call. -// Exactly one of *HealthChecksAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *HealthChecksAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalPublicDelegatedPrefixes.get" call. +// Exactly one of *PublicDelegatedPrefix or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { +func (c *GlobalPublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66810,7 +69319,7 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthChecksAggregatedList{ + ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66822,57 +69331,32 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal } return ret, nil // { - // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", + // "description": "Returns the specified global PublicDelegatedPrefix resource.", // "httpMethod": "GET", - // "id": "compute.healthChecks.aggregatedList", + // "id": "compute.globalPublicDelegatedPrefixes.get", // "parameterOrder": [ - // "project" + // "project", + // "publicDelegatedPrefix" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", - // "location": "query", - // "type": "boolean" + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/aggregated/healthChecks", + // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "response": { - // "$ref": "HealthChecksAggregatedList" + // "$ref": "PublicDelegatedPrefix" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66883,43 +69367,25 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.delete": +// method id "compute.globalPublicDelegatedPrefixes.insert": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalPublicDelegatedPrefixesInsertCall struct { + s *Service + project string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a global PublicDelegatedPrefix in the specified +// project using the parameters that are included in the request. +// +// - project: Project ID for this request. +func (r *GlobalPublicDelegatedPrefixesService) Insert(project string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesInsertCall { + c := &GlobalPublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.publicdelegatedprefix = publicdelegatedprefix return c } @@ -66937,7 +69403,7 @@ func (r *HealthChecksService) Delete(project string, healthCheck string) *Health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { +func (c *GlobalPublicDelegatedPrefixesInsertCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66945,7 +69411,7 @@ func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDelete // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *GlobalPublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66953,52 +69419,56 @@ func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *GlobalPublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. +// Do executes the "compute.globalPublicDelegatedPrefixes.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67029,21 +69499,13 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // "description": "Creates a global PublicDelegatedPrefix in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalPublicDelegatedPrefixes.insert", // "parameterOrder": [ - // "project", - // "healthCheck" + // "project" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67057,7 +69519,10 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "projects/{project}/global/healthChecks/{healthCheck}", + // "path": "projects/{project}/global/publicDelegatedPrefixes", + // "request": { + // "$ref": "PublicDelegatedPrefix" + // }, // "response": { // "$ref": "Operation" // }, @@ -67069,31 +69534,104 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.get": +// method id "compute.globalPublicDelegatedPrefixes.list": -type HealthChecksGetCall struct { +type GlobalPublicDelegatedPrefixesListCall struct { s *Service project string - healthCheck string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the global PublicDelegatedPrefixes for a project. +// +// - project: Project ID for this request. +func (r *GlobalPublicDelegatedPrefixesService) List(project string) *GlobalPublicDelegatedPrefixesListCall { + c := &GlobalPublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *GlobalPublicDelegatedPrefixesListCall) Filter(filter string) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalPublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *GlobalPublicDelegatedPrefixesListCall) OrderBy(orderBy string) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalPublicDelegatedPrefixesListCall) PageToken(pageToken string) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *GlobalPublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { +func (c *GlobalPublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67103,7 +69641,7 @@ func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { +func (c *GlobalPublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *GlobalPublicDelegatedPrefixesListCall { c.ifNoneMatch_ = entityTag return c } @@ -67111,23 +69649,23 @@ func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { +func (c *GlobalPublicDelegatedPrefixesListCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67138,7 +69676,7 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -67146,20 +69684,19 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.globalPublicDelegatedPrefixes.list" call. +// Exactly one of *PublicDelegatedPrefixList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalPublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67178,7 +69715,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &PublicDelegatedPrefixList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67190,19 +69727,34 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Lists the global PublicDelegatedPrefixes for a project.", // "httpMethod": "GET", - // "id": "compute.healthChecks.get", + // "id": "compute.globalPublicDelegatedPrefixes.list", // "parameterOrder": [ - // "project", - // "healthCheck" + // "project" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -67211,11 +69763,16 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/global/healthChecks/{healthCheck}", + // "path": "projects/{project}/global/publicDelegatedPrefixes", // "response": { - // "$ref": "HealthCheck" + // "$ref": "PublicDelegatedPrefixList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -67226,23 +69783,51 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } -// method id "compute.healthChecks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalPublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalPublicDelegatedPrefixes.patch": + +type GlobalPublicDelegatedPrefixesPatchCall struct { + s *Service + project string + publicDelegatedPrefix string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified global PublicDelegatedPrefix resource +// with the data included in the request. This method supports PATCH +// semantics and uses JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. +func (r *GlobalPublicDelegatedPrefixesService) Patch(project string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesPatchCall { + c := &GlobalPublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthcheck = healthcheck + c.publicDelegatedPrefix = publicDelegatedPrefix + c.publicdelegatedprefix = publicdelegatedprefix return c } @@ -67260,7 +69845,7 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { +func (c *GlobalPublicDelegatedPrefixesPatchCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67268,7 +69853,7 @@ func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsert // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67276,56 +69861,57 @@ func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInser // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.insert" call. +// Do executes the "compute.globalPublicDelegatedPrefixes.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67356,11 +69942,12 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", + // "description": "Patches the specified global PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.globalPublicDelegatedPrefixes.patch", // "parameterOrder": [ - // "project" + // "project", + // "publicDelegatedPrefix" // ], // "parameters": { // "project": { @@ -67370,15 +69957,22 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/healthChecks", + // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "request": { - // "$ref": "HealthCheck" + // "$ref": "PublicDelegatedPrefix" // }, // "response": { // "$ref": "Operation" @@ -67391,9 +69985,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.list": +// method id "compute.healthChecks.aggregatedList": -type HealthChecksListCall struct { +type HealthChecksAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -67402,10 +69996,12 @@ type HealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all HealthCheck resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { + c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -67433,18 +70029,31 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // expressions explicitly. For example: ``` (cpuPlatform = "Intel // Skylake") OR (cpuPlatform = "Intel Broadwell") AND // (scheduling.automaticRestart = true) ``` -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { +func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *HealthChecksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *HealthChecksAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { +func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -67462,7 +70071,7 @@ func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCal // // Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { +func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -67470,7 +70079,7 @@ func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { +func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -67478,8 +70087,8 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. -func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksListCall { +// false. +func (c *HealthChecksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -67487,7 +70096,7 @@ func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67497,7 +70106,7 @@ func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { +func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -67505,23 +70114,23 @@ func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *HealthChecksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67532,7 +70141,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -67545,14 +70154,14 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.aggregatedList" call. +// Exactly one of *HealthChecksAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *HealthChecksAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67571,7 +70180,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &HealthChecksAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67583,9 +70192,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "id": "compute.healthChecks.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -67595,6 +70204,11 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -67614,21 +70228,21 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/global/healthChecks", + // "path": "projects/{project}/aggregated/healthChecks", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "HealthChecksAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -67642,7 +70256,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -67660,26 +70274,25 @@ func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckLis } } -// method id "compute.healthChecks.patch": +// method id "compute.healthChecks.delete": -type HealthChecksPatchCall struct { +type HealthChecksDeleteCall struct { s *Service project string healthCheck string - healthcheck *HealthCheck urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +// +// - healthCheck: Name of the HealthCheck resource to delete. +// - project: Project ID for this request. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.healthCheck = healthCheck - c.healthcheck = healthcheck return c } @@ -67697,7 +70310,7 @@ func (r *HealthChecksService) Patch(project string, healthCheck string, healthch // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67705,7 +70318,7 @@ func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67713,38 +70326,33 @@ func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { +func (c *HealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -67756,14 +70364,789 @@ func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.patch" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.healthChecks.delete", + // "parameterOrder": [ + // "project", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/healthChecks/{healthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.healthChecks.get": + +type HealthChecksGetCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +// +// - healthCheck: Name of the HealthCheck resource to return. +// - project: Project ID for this request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.healthChecks.get", + // "parameterOrder": [ + // "project", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/healthChecks/{healthCheck}", + // "response": { + // "$ref": "HealthCheck" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.healthChecks.insert": + +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +// +// - project: Project ID for this request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.healthChecks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/healthChecks", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.healthChecks.list": + +type HealthChecksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +// +// - project: Project ID for this request. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.healthChecks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/healthChecks", + // "response": { + // "$ref": "HealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.healthChecks.patch": + +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// +// - healthCheck: Name of the HealthCheck resource to patch. +// - project: Project ID for this request. +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67851,6 +71234,9 @@ type HealthChecksUpdateCall struct { // Update: Updates a HealthCheck resource in the specified project using // the data included in the request. +// +// - healthCheck: Name of the HealthCheck resource to update. +// - project: Project ID for this request. func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -67905,7 +71291,7 @@ func (c *HealthChecksUpdateCall) Header() http.Header { func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68025,6 +71411,9 @@ type HttpHealthChecksDeleteCall struct { } // Delete: Deletes the specified HttpHealthCheck resource. +// +// - httpHealthCheck: Name of the HttpHealthCheck resource to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -68079,7 +71468,7 @@ func (c *HttpHealthChecksDeleteCall) Header() http.Header { func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68193,6 +71582,9 @@ type HttpHealthChecksGetCall struct { // Get: Returns the specified HttpHealthCheck resource. Gets a list of // available HTTP health checks by making a list() request. +// +// - httpHealthCheck: Name of the HttpHealthCheck resource to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -68238,7 +71630,7 @@ func (c *HttpHealthChecksGetCall) Header() http.Header { func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68350,6 +71742,8 @@ type HttpHealthChecksInsertCall struct { // Insert: Creates a HttpHealthCheck resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -68404,7 +71798,7 @@ func (c *HttpHealthChecksInsertCall) Header() http.Header { func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68516,6 +71910,8 @@ type HttpHealthChecksListCall struct { // List: Retrieves the list of HttpHealthCheck resources available to // the specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -68591,7 +71987,7 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *HttpHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpHealthChecksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -68634,7 +72030,7 @@ func (c *HttpHealthChecksListCall) Header() http.Header { func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68734,7 +72130,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -68788,6 +72184,9 @@ type HttpHealthChecksPatchCall struct { // Patch: Updates a HttpHealthCheck resource in the specified project // using the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. +// +// - httpHealthCheck: Name of the HttpHealthCheck resource to patch. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -68843,7 +72242,7 @@ func (c *HttpHealthChecksPatchCall) Header() http.Header { func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68965,6 +72364,9 @@ type HttpHealthChecksUpdateCall struct { // Update: Updates a HttpHealthCheck resource in the specified project // using the data included in the request. +// +// - httpHealthCheck: Name of the HttpHealthCheck resource to update. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -69020,7 +72422,7 @@ func (c *HttpHealthChecksUpdateCall) Header() http.Header { func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69140,6 +72542,9 @@ type HttpsHealthChecksDeleteCall struct { } // Delete: Deletes the specified HttpsHealthCheck resource. +// +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to delete. +// - project: Project ID for this request. func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69193,7 +72598,7 @@ func (c *HttpsHealthChecksDeleteCall) Header() http.Header { func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69307,6 +72712,9 @@ type HttpsHealthChecksGetCall struct { // Get: Returns the specified HttpsHealthCheck resource. Gets a list of // available HTTPS health checks by making a list() request. +// +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to return. +// - project: Project ID for this request. func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69351,7 +72759,7 @@ func (c *HttpsHealthChecksGetCall) Header() http.Header { func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69463,6 +72871,8 @@ type HttpsHealthChecksInsertCall struct { // Insert: Creates a HttpsHealthCheck resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69516,7 +72926,7 @@ func (c *HttpsHealthChecksInsertCall) Header() http.Header { func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69628,6 +73038,8 @@ type HttpsHealthChecksListCall struct { // List: Retrieves the list of HttpsHealthCheck resources available to // the specified project. +// +// - project: Project ID for this request. func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69702,7 +73114,7 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *HttpsHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpsHealthChecksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -69745,7 +73157,7 @@ func (c *HttpsHealthChecksListCall) Header() http.Header { func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69845,7 +73257,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -69899,6 +73311,9 @@ type HttpsHealthChecksPatchCall struct { // Patch: Updates a HttpsHealthCheck resource in the specified project // using the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. +// +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to patch. +// - project: Project ID for this request. func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69953,7 +73368,7 @@ func (c *HttpsHealthChecksPatchCall) Header() http.Header { func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70075,6 +73490,9 @@ type HttpsHealthChecksUpdateCall struct { // Update: Updates a HttpsHealthCheck resource in the specified project // using the data included in the request. +// +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to update. +// - project: Project ID for this request. func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70129,7 +73547,7 @@ func (c *HttpsHealthChecksUpdateCall) Header() http.Header { func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70249,6 +73667,9 @@ type ImagesDeleteCall struct { } // Delete: Deletes the specified image. +// +// - image: Name of the image resource to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -70303,7 +73724,7 @@ func (c *ImagesDeleteCall) Header() http.Header { func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70419,6 +73840,9 @@ type ImagesDeprecateCall struct { // // If an empty request body is given, clears the deprecation status // instead. +// +// - image: Image name. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -70474,7 +73898,7 @@ func (c *ImagesDeprecateCall) Header() http.Header { func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70596,6 +74020,9 @@ type ImagesGetCall struct { // Get: Returns the specified image. Gets a list of available images by // making a list() request. +// +// - image: Name of the image resource to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/images/get func (r *ImagesService) Get(project string, image string) *ImagesGetCall { c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -70641,7 +74068,7 @@ func (c *ImagesGetCall) Header() http.Header { func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70754,6 +74181,9 @@ type ImagesGetFromFamilyCall struct { // GetFromFamily: Returns the latest image that is part of an image // family and is not deprecated. +// +// - family: Name of the image family to search for. +// - project: Project ID for this request. func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70798,7 +74228,7 @@ func (c *ImagesGetFromFamilyCall) Header() http.Header { func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70911,6 +74341,9 @@ type ImagesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70962,7 +74395,7 @@ func (c *ImagesGetIamPolicyCall) Header() http.Header { func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71080,6 +74513,8 @@ type ImagesInsertCall struct { // Insert: Creates an image in the specified project using the data // included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -71141,7 +74576,7 @@ func (c *ImagesInsertCall) Header() http.Header { func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71266,6 +74701,8 @@ type ImagesListCall struct { // want to get a list of publicly-available images, use this method to // make a request to the respective image project, such as debian-cloud // or windows-cloud. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/images/list func (r *ImagesService) List(project string) *ImagesListCall { c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -71341,7 +74778,7 @@ func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ImagesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -71384,7 +74821,7 @@ func (c *ImagesListCall) Header() http.Header { func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71484,7 +74921,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -71538,6 +74975,9 @@ type ImagesPatchCall struct { // Patch: Patches the specified image with the data included in the // request. Only the following fields can be modified: family, // description, deprecation status. +// +// - image: Name of the image resource to patch. +// - project: Project ID for this request. func (r *ImagesService) Patch(project string, image string, image2 *Image) *ImagesPatchCall { c := &ImagesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71592,7 +75032,7 @@ func (c *ImagesPatchCall) Header() http.Header { func (c *ImagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71714,6 +75154,9 @@ type ImagesSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71749,7 +75192,7 @@ func (c *ImagesSetIamPolicyCall) Header() http.Header { func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71866,6 +75309,9 @@ type ImagesSetLabelsCall struct { // SetLabels: Sets the labels on an image. To learn more about labels, // read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71901,7 +75347,7 @@ func (c *ImagesSetLabelsCall) Header() http.Header { func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72018,6 +75464,9 @@ type ImagesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72053,7 +75502,7 @@ func (c *ImagesTestIamPermissionsCall) Header() http.Header { func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72187,6 +75636,11 @@ type InstanceGroupManagersAbandonInstancesCall struct { // // You can specify a maximum of 1000 instances with this method per // request. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72242,7 +75696,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72370,6 +75824,8 @@ type InstanceGroupManagersAggregatedListCall struct { // AggregatedList: Retrieves the list of managed instance groups and // groups them by zone. +// +// - project: Project ID for this request. func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72457,7 +75913,7 @@ func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *I // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupManagersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -72500,7 +75956,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72606,7 +76062,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -72661,6 +76117,12 @@ type InstanceGroupManagersApplyUpdatesToInstancesCall struct { // ApplyUpdatesToInstances: Applies changes to selected instances on the // managed instance group. This method can be used to apply new // overrides and/or new versions. +// +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. Should conform to RFC1035. func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72697,7 +76159,7 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72826,6 +76288,12 @@ type InstanceGroupManagersCreateInstancesCall struct { // the createInstances request is successful. The underlying actions // take additional time. You must separately verify the status of the // creating or actions with the listmanagedinstances method. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72880,7 +76348,7 @@ func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73011,6 +76479,12 @@ type InstanceGroupManagersDeleteCall struct { // instances in that group. Note that the instance group must not belong // to a backend service. Read Deleting an instance group for more // information. +// +// - instanceGroupManager: The name of the managed instance group to +// delete. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73065,7 +76539,7 @@ func (c *InstanceGroupManagersDeleteCall) Header() http.Header { func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73201,6 +76675,11 @@ type InstanceGroupManagersDeleteInstancesCall struct { // // You can specify a maximum of 1000 instances with this method per // request. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73256,7 +76735,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73386,6 +76865,12 @@ type InstanceGroupManagersDeletePerInstanceConfigsCall struct { // DeletePerInstanceConfigs: Deletes selected per-instance configs for // the managed instance group. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73422,7 +76907,7 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73548,6 +77033,11 @@ type InstanceGroupManagersGetCall struct { // Get: Returns all of the details about the specified managed instance // group. Gets a list of available managed instance groups by making a // list() request. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73593,7 +77083,7 @@ func (c *InstanceGroupManagersGetCall) Header() http.Header { func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73721,6 +77211,10 @@ type InstanceGroupManagersInsertCall struct { // // A managed instance group can have up to 1000 VM instances per group. // Please contact Cloud Support if you need an increase in this limit. +// +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73775,7 +77269,7 @@ func (c *InstanceGroupManagersInsertCall) Header() http.Header { func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73896,6 +77390,10 @@ type InstanceGroupManagersListCall struct { // List: Retrieves a list of managed instance groups that are contained // within the specified project and zone. +// +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73971,7 +77469,7 @@ func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGro // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -74014,7 +77512,7 @@ func (c *InstanceGroupManagersListCall) Header() http.Header { func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74116,7 +77614,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -74177,6 +77675,14 @@ type InstanceGroupManagersListErrorsCall struct { // ListErrors: Lists all errors thrown by actions on instances for a // given managed instance group. The filter and orderBy query parameters // are not supported. +// +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) ListErrors(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListErrorsCall { c := &InstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74253,7 +77759,7 @@ func (c *InstanceGroupManagersListErrorsCall) PageToken(pageToken string) *Insta // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListErrorsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -74296,7 +77802,7 @@ func (c *InstanceGroupManagersListErrorsCall) Header() http.Header { func (c *InstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74407,7 +77913,7 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -74471,6 +77977,11 @@ type InstanceGroupManagersListManagedInstancesCall struct { // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy // query parameter is not supported. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74547,7 +78058,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken stri // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -74580,7 +78091,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74689,7 +78200,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -74749,6 +78260,12 @@ type InstanceGroupManagersListPerInstanceConfigsCall struct { // ListPerInstanceConfigs: Lists all of the per-instance configs defined // for the managed instance group. The orderBy query parameter is not // supported. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74825,7 +78342,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken st // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -74858,7 +78375,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74967,7 +78484,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -75032,6 +78549,11 @@ type InstanceGroupManagersPatchCall struct { // the individual instances with the listManagedInstances method. This // method supports PATCH semantics and uses the JSON merge patch format // and processing rules. +// +// - instanceGroupManager: The name of the instance group manager. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75087,7 +78609,7 @@ func (c *InstanceGroupManagersPatchCall) Header() http.Header { func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75218,6 +78740,12 @@ type InstanceGroupManagersPatchPerInstanceConfigsCall struct { // PatchPerInstanceConfigs: Inserts or patches per-instance configs for // the managed instance group. perInstanceConfig.name serves as a key // used to distinguish whether to perform insert or patch. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq) *InstanceGroupManagersPatchPerInstanceConfigsCall { c := &InstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75273,7 +78801,7 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75401,13 +78929,13 @@ type InstanceGroupManagersRecreateInstancesCall struct { header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. +// RecreateInstances: Flags the specified VM instances in the managed +// instance group to be immediately recreated. Each instance is +// recreated using the group's current configuration. This operation is +// marked as DONE when the flag is set even if the instances have not +// yet been recreated. You must separately verify the status of each +// instance by checking its currentAction field; for more information, +// see Checking the status of managed instances. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining @@ -75416,6 +78944,11 @@ type InstanceGroupManagersRecreateInstancesCall struct { // // You can specify a maximum of 1000 instances with this method per // request. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75471,7 +79004,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75537,7 +79070,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ @@ -75619,6 +79152,15 @@ type InstanceGroupManagersResizeCall struct { // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining // duration has elapsed before the VM instance is removed or deleted. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - size: The number of running instances that the managed instance +// group should maintain at any given time. The group automatically +// adds or removes instances to maintain the number of instances +// specified by this parameter. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75674,7 +79216,7 @@ func (c *InstanceGroupManagersResizeCall) Header() http.Header { func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75807,6 +79349,11 @@ type InstanceGroupManagersSetInstanceTemplateCall struct { // instances in the group do not change unless you run // recreateInstances, run applyUpdatesToInstances, or set the group's // updatePolicy.type to PROACTIVE. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75862,7 +79409,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75997,6 +79544,11 @@ type InstanceGroupManagersSetTargetPoolsCall struct { // if the instances have not yet been added to their target pools. The // change might take some time to apply to all of the instances in the // group depending on the size of the group. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76052,7 +79604,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76183,6 +79735,12 @@ type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { // UpdatePerInstanceConfigs: Inserts or updates per-instance configs for // the managed instance group. perInstanceConfig.name serves as a key // used to distinguish whether to perform insert or patch. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76238,7 +79796,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76369,6 +79927,11 @@ type InstanceGroupsAddInstancesCall struct { // AddInstances: Adds a list of instances to the specified instance // group. All of the instances in the instance group must be in the same // network/subnetwork. Read Adding instances for more information. +// +// - instanceGroup: The name of the instance group where you are adding +// instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76424,7 +79987,7 @@ func (c *InstanceGroupsAddInstancesCall) Header() http.Header { func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76552,6 +80115,8 @@ type InstanceGroupsAggregatedListCall struct { // AggregatedList: Retrieves the list of instance groups and sorts them // by zone. +// +// - project: Project ID for this request. func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76639,7 +80204,7 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -76682,7 +80247,7 @@ func (c *InstanceGroupsAggregatedListCall) Header() http.Header { func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76787,7 +80352,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -76842,6 +80407,10 @@ type InstanceGroupsDeleteCall struct { // group are not deleted. Note that instance group must not belong to a // backend service. Read Deleting an instance group for more // information. +// +// - instanceGroup: The name of the instance group to delete. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76896,7 +80465,7 @@ func (c *InstanceGroupsDeleteCall) Header() http.Header { func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77021,6 +80590,10 @@ type InstanceGroupsGetCall struct { // // For managed instance groups, use the instanceGroupManagers or // regionInstanceGroupManagers methods instead. +// +// - instanceGroup: The name of the instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77066,7 +80639,7 @@ func (c *InstanceGroupsGetCall) Header() http.Header { func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77186,6 +80759,10 @@ type InstanceGroupsInsertCall struct { // Insert: Creates an instance group in the specified project using the // parameters that are included in the request. +// +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the instance +// group. func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77240,7 +80817,7 @@ func (c *InstanceGroupsInsertCall) Header() http.Header { func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77364,6 +80941,9 @@ type InstanceGroupsListCall struct { // // For managed instance groups, use the instanceGroupManagers or // regionInstanceGroupManagers methods instead. +// +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77439,7 +81019,7 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -77482,7 +81062,7 @@ func (c *InstanceGroupsListCall) Header() http.Header { func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77584,7 +81164,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -77644,6 +81224,11 @@ type InstanceGroupsListInstancesCall struct { // ListInstances: Lists the instances in the specified instance group. // The orderBy query parameter is not supported. +// +// - instanceGroup: The name of the instance group from which you want +// to generate a list of included instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77721,7 +81306,7 @@ func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceG // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListInstancesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -77754,7 +81339,7 @@ func (c *InstanceGroupsListInstancesCall) Header() http.Header { func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77866,7 +81451,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -77933,6 +81518,11 @@ type InstanceGroupsRemoveInstancesCall struct { // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining // duration before the VM instance is removed or deleted. +// +// - instanceGroup: The name of the instance group where the specified +// instances will be removed. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77988,7 +81578,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78117,6 +81707,11 @@ type InstanceGroupsSetNamedPortsCall struct { } // SetNamedPorts: Sets the named ports for the specified instance group. +// +// - instanceGroup: The name of the instance group where the named ports +// are updated. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78172,7 +81767,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78301,6 +81896,9 @@ type InstanceTemplatesDeleteCall struct { // Delete: Deletes the specified instance template. Deleting an instance // template is permanent and cannot be undone. It is not possible to // delete templates that are already in use by a managed instance group. +// +// - instanceTemplate: The name of the instance template to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -78355,7 +81953,7 @@ func (c *InstanceTemplatesDeleteCall) Header() http.Header { func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78469,6 +82067,9 @@ type InstanceTemplatesGetCall struct { // Get: Returns the specified instance template. Gets a list of // available instance templates by making a list() request. +// +// - instanceTemplate: The name of the instance template. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -78514,7 +82115,7 @@ func (c *InstanceTemplatesGetCall) Header() http.Header { func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78627,6 +82228,9 @@ type InstanceTemplatesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78678,7 +82282,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78799,6 +82403,8 @@ type InstanceTemplatesInsertCall struct { // template to update an existing instance group, your new instance // template must use the same network or, if applicable, the same // subnetwork as the original template. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -78853,7 +82459,7 @@ func (c *InstanceTemplatesInsertCall) Header() http.Header { func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78965,6 +82571,8 @@ type InstanceTemplatesListCall struct { // List: Retrieves a list of instance templates that are contained // within the specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -79040,7 +82648,7 @@ func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplat // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -79083,7 +82691,7 @@ func (c *InstanceTemplatesListCall) Header() http.Header { func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79183,7 +82791,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -79236,6 +82844,9 @@ type InstanceTemplatesSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79271,7 +82882,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79388,6 +82999,9 @@ type InstanceTemplatesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79423,7 +83037,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79542,6 +83156,12 @@ type InstancesAddAccessConfigCall struct { // AddAccessConfig: Adds an access config to an instance's network // interface. +// +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface to add to this +// instance. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -79599,7 +83219,7 @@ func (c *InstancesAddAccessConfigCall) Header() http.Header { func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79739,6 +83359,10 @@ type InstancesAddResourcePoliciesCall struct { // AddResourcePolicies: Adds existing resource policies to an instance. // You can only add one policy right now which will be applied to this // instance for scheduling live migrations. +// +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) AddResourcePolicies(project string, zone string, instance string, instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest) *InstancesAddResourcePoliciesCall { c := &InstancesAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79794,7 +83418,7 @@ func (c *InstancesAddResourcePoliciesCall) Header() http.Header { func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79924,6 +83548,8 @@ type InstancesAggregatedListCall struct { // AggregatedList: Retrieves aggregated list of all of the instances in // your project across all regions and zones. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -80012,7 +83638,7 @@ func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggr // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -80055,7 +83681,7 @@ func (c *InstancesAggregatedListCall) Header() http.Header { func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80160,7 +83786,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -80216,6 +83842,10 @@ type InstancesAttachDiskCall struct { // must first create the disk before you can attach it. It is not // possible to create and attach a disk at the same time. For more // information, read Adding a persistent disk to your instance. +// +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -80281,7 +83911,7 @@ func (c *InstancesAttachDiskCall) Header() http.Header { func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80403,26 +84033,28 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.delete": +// method id "compute.instances.bulkInsert": -type InstancesDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesBulkInsertCall struct { + s *Service + project string + zone string + bulkinsertinstanceresource *BulkInsertInstanceResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// BulkInsert: Creates multiple instances. Count specifies the number of +// instances to create. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) BulkInsert(project string, zone string, bulkinsertinstanceresource *BulkInsertInstanceResource) *InstancesBulkInsertCall { + c := &InstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance + c.bulkinsertinstanceresource = bulkinsertinstanceresource return c } @@ -80440,7 +84072,7 @@ func (r *InstancesService) Delete(project string, zone string, instance string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { +func (c *InstancesBulkInsertCall) RequestId(requestId string) *InstancesBulkInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80448,7 +84080,7 @@ func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *InstancesBulkInsertCall) Fields(s ...googleapi.Field) *InstancesBulkInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80456,211 +84088,36 @@ func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *InstancesBulkInsertCall) Context(ctx context.Context) *InstancesBulkInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *InstancesBulkInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) if err != nil { return nil, err } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.deleteAccessConfig": - -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/bulkInsert") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -80668,21 +84125,383 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. +// Do executes the "compute.instances.bulkInsert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates multiple instances. Count specifies the number of instances to create.", + // "httpMethod": "POST", + // "id": "compute.instances.bulkInsert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/bulkInsert", + // "request": { + // "$ref": "BulkInsertInstanceResource" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.delete": + +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified Instance resource. For more +// information, see Deleting an instance. +// +// - instance: Name of the instance resource to delete. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Instance resource. For more information, see Deleting an instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.deleteAccessConfig": + +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// +// - accessConfig: The name of the access config to delete. +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.deleteAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80788,6 +84607,13 @@ type InstancesDetachDiskCall struct { } // DetachDisk: Detaches a disk from an instance. +// +// - deviceName: The device name of the disk to detach. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: Instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -80844,7 +84670,7 @@ func (c *InstancesDetachDiskCall) Header() http.Header { func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80975,6 +84801,10 @@ type InstancesGetCall struct { // Get: Returns the specified Instance resource. Gets a list of // available instances by making a list() request. +// +// - instance: Name of the instance resource to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -81021,7 +84851,7 @@ func (c *InstancesGetCall) Header() http.Header { func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81129,6 +84959,189 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } +// method id "compute.instances.getEffectiveFirewalls": + +type InstancesGetEffectiveFirewallsCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetEffectiveFirewalls: Returns effective firewalls applied to an +// interface of the instance. +// +// - instance: Name of the instance scoping this request. +// - networkInterface: The name of the network interface to get the +// effective firewalls. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) GetEffectiveFirewalls(project string, zone string, instance string, networkInterface string) *InstancesGetEffectiveFirewallsCall { + c := &InstancesGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *InstancesGetEffectiveFirewallsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *InstancesGetEffectiveFirewallsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetEffectiveFirewallsCall) Context(ctx context.Context) *InstancesGetEffectiveFirewallsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesGetEffectiveFirewallsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.getEffectiveFirewalls" call. +// Exactly one of *InstancesGetEffectiveFirewallsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *InstancesGetEffectiveFirewallsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*InstancesGetEffectiveFirewallsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstancesGetEffectiveFirewallsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns effective firewalls applied to an interface of the instance.", + // "httpMethod": "GET", + // "id": "compute.instances.getEffectiveFirewalls", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "networkInterface" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to get the effective firewalls.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", + // "response": { + // "$ref": "InstancesGetEffectiveFirewallsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.instances.getGuestAttributes": type InstancesGetGuestAttributesCall struct { @@ -81143,6 +85156,10 @@ type InstancesGetGuestAttributesCall struct { } // GetGuestAttributes: Returns the specified guest attributes entry. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81202,7 +85219,7 @@ func (c *InstancesGetGuestAttributesCall) Header() http.Header { func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81335,6 +85352,10 @@ type InstancesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81387,7 +85408,7 @@ func (c *InstancesGetIamPolicyCall) Header() http.Header { func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81515,6 +85536,10 @@ type InstancesGetScreenshotCall struct { } // GetScreenshot: Returns the screenshot from the specified instance. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) GetScreenshot(project string, zone string, instance string) *InstancesGetScreenshotCall { c := &InstancesGetScreenshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81560,7 +85585,7 @@ func (c *InstancesGetScreenshotCall) Header() http.Header { func (c *InstancesGetScreenshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81683,6 +85708,10 @@ type InstancesGetSerialPortOutputCall struct { // GetSerialPortOutput: Returns the last 1 MB of serial port output from // the specified instance. +// +// - instance: Name of the instance for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -81758,7 +85787,7 @@ func (c *InstancesGetSerialPortOutputCall) Header() http.Header { func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81896,6 +85925,10 @@ type InstancesGetShieldedInstanceIdentityCall struct { // GetShieldedInstanceIdentity: Returns the Shielded Instance Identity // of an instance +// +// - instance: Name or id of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81941,7 +85974,7 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82063,6 +86096,9 @@ type InstancesInsertCall struct { // Insert: Creates an instance resource in the specified project using // the data included in the request. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -82134,7 +86170,7 @@ func (c *InstancesInsertCall) Header() http.Header { func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82261,6 +86297,9 @@ type InstancesListCall struct { // List: Retrieves the list of instances contained within the specified // zone. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list func (r *InstancesService) List(project string, zone string) *InstancesListCall { c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -82337,7 +86376,7 @@ func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -82380,7 +86419,7 @@ func (c *InstancesListCall) Header() http.Header { func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82482,7 +86521,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -82546,6 +86585,11 @@ type InstancesListReferrersCall struct { // part of a managed or unmanaged instance group, the referrers list // includes the instance group. For more information, read Viewing // referrers to VM instances. +// +// - instance: Name of the target instance scoping this request, or '-' +// if the request should span over all instances in the container. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -82622,7 +86666,7 @@ func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListR // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InstancesListReferrersCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListReferrersCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -82665,7 +86709,7 @@ func (c *InstancesListReferrersCall) Header() http.Header { func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82776,7 +86820,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -82836,6 +86880,10 @@ type InstancesRemoveResourcePoliciesCall struct { } // RemoveResourcePolicies: Removes resource policies from an instance. +// +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) RemoveResourcePolicies(project string, zone string, instance string, instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest) *InstancesRemoveResourcePoliciesCall { c := &InstancesRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -82891,7 +86939,7 @@ func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83023,6 +87071,10 @@ type InstancesResetCall struct { // Reset: Performs a reset on the instance. This is a hard reset the VM // does not do a graceful shutdown. For more information, see Resetting // an instance. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -83078,7 +87130,7 @@ func (c *InstancesResetCall) Header() http.Header { func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83200,6 +87252,10 @@ type InstancesSetDeletionProtectionCall struct { } // SetDeletionProtection: Sets deletion protection on the instance. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83261,7 +87317,7 @@ func (c *InstancesSetDeletionProtectionCall) Header() http.Header { func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83390,6 +87446,15 @@ type InstancesSetDiskAutoDeleteCall struct { // SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to // an instance. +// +// - autoDelete: Whether to auto-delete the disk when the instance is +// deleted. +// - deviceName: The device name of the disk to modify. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -83447,7 +87512,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83586,6 +87651,10 @@ type InstancesSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83622,7 +87691,7 @@ func (c *InstancesSetIamPolicyCall) Header() http.Header { func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83749,6 +87818,10 @@ type InstancesSetLabelsCall struct { // SetLabels: Sets labels on an instance. To learn more about labels, // read the Labeling Resources documentation. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83804,7 +87877,7 @@ func (c *InstancesSetLabelsCall) Header() http.Header { func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83936,6 +88009,10 @@ type InstancesSetMachineResourcesCall struct { // SetMachineResources: Changes the number and/or type of accelerator // for a stopped instance to the values specified in the request. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83991,7 +88068,7 @@ func (c *InstancesSetMachineResourcesCall) Header() http.Header { func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84123,6 +88200,10 @@ type InstancesSetMachineTypeCall struct { // SetMachineType: Changes the machine type for a stopped instance to // the machine type specified in the request. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84178,7 +88259,7 @@ func (c *InstancesSetMachineTypeCall) Header() http.Header { func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84310,6 +88391,10 @@ type InstancesSetMetadataCall struct { // SetMetadata: Sets metadata for the specified instance to the data // included in the request. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -84366,7 +88451,7 @@ func (c *InstancesSetMetadataCall) Header() http.Header { func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84500,6 +88585,10 @@ type InstancesSetMinCpuPlatformCall struct { // instance should use. This method can only be called on a stopped // instance. For more information, read Specifying a Minimum CPU // Platform. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84555,7 +88644,7 @@ func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84689,6 +88778,10 @@ type InstancesSetSchedulingCall struct { // call this method on a stopped instance, that is, a VM instance that // is in a `TERMINATED` state. See Instance Life Cycle for more // information on the possible instance states. +// +// - instance: Instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -84745,7 +88838,7 @@ func (c *InstancesSetSchedulingCall) Header() http.Header { func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84878,6 +88971,10 @@ type InstancesSetServiceAccountCall struct { // SetServiceAccount: Sets the service account on the instance. For more // information, read Changing the service account and access scopes for // an instance. +// +// - instance: Name of the instance resource to start. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84933,7 +89030,7 @@ func (c *InstancesSetServiceAccountCall) Header() http.Header { func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85067,6 +89164,10 @@ type InstancesSetShieldedInstanceIntegrityPolicyCall struct { // integrity policy for an instance. You can only use this method on a // running instance. This method supports PATCH semantics and uses the // JSON merge patch format and processing rules. +// +// - instance: Name or id of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85122,7 +89223,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85254,6 +89355,10 @@ type InstancesSetTagsCall struct { // SetTags: Sets network tags for the specified instance to the data // included in the request. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -85310,7 +89415,7 @@ func (c *InstancesSetTagsCall) Header() http.Header { func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85441,6 +89546,10 @@ type InstancesSimulateMaintenanceEventCall struct { // SimulateMaintenanceEvent: Simulates a maintenance event on the // instance. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85476,7 +89585,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85594,6 +89703,10 @@ type InstancesStartCall struct { // Start: Starts an instance that was stopped using the instances().stop // method. For more information, see Restart an instance. +// +// - instance: Name of the instance resource to start. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -85649,7 +89762,7 @@ func (c *InstancesStartCall) Header() http.Header { func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85774,6 +89887,10 @@ type InstancesStartWithEncryptionKeyCall struct { // StartWithEncryptionKey: Starts an instance that was stopped using the // instances().stop method. For more information, see Restart an // instance. +// +// - instance: Name of the instance resource to start. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85829,7 +89946,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85964,6 +90081,10 @@ type InstancesStopCall struct { // that the VM is using, such as persistent disks and static IP // addresses, will continue to be charged until they are deleted. For // more information, see Stopping an instance. +// +// - instance: Name of the instance resource to stop. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86019,7 +90140,7 @@ func (c *InstancesStopCall) Header() http.Header { func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86143,6 +90264,10 @@ type InstancesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86179,7 +90304,7 @@ func (c *InstancesTestIamPermissionsCall) Header() http.Header { func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86309,6 +90434,10 @@ type InstancesUpdateCall struct { // available. This method can update only a specific set of instance // properties. See Updating a running instance for a list of updatable // instance properties. +// +// - instance: Name of the instance resource to update. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) Update(project string, zone string, instance string, instance2 *Instance) *InstancesUpdateCall { c := &InstancesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86396,7 +90525,7 @@ func (c *InstancesUpdateCall) Header() http.Header { func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86564,6 +90693,12 @@ type InstancesUpdateAccessConfigCall struct { // instance's network interface with the data included in the request. // This method supports PATCH semantics and uses the JSON merge patch // format and processing rules. +// +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface where the +// access config is attached. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86620,7 +90755,7 @@ func (c *InstancesUpdateAccessConfigCall) Header() http.Header { func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86761,6 +90896,10 @@ type InstancesUpdateDisplayDeviceCall struct { // You can only use this method on a stopped VM instance. This method // supports PATCH semantics and uses the JSON merge patch format and // processing rules. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86816,7 +90955,7 @@ func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86952,6 +91091,11 @@ type InstancesUpdateNetworkInterfaceCall struct { // instructions on changing alias IP ranges. See Migrating a VM between // networks for instructions on migrating an interface. This method // follows PATCH semantics. +// +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface to update. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87008,7 +91152,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87149,6 +91293,10 @@ type InstancesUpdateShieldedInstanceConfigCall struct { // for an instance. You can only use this method on a stopped instance. // This method supports PATCH semantics and uses the JSON merge patch // format and processing rules. +// +// - instance: Name or id of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87204,7 +91352,7 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87334,6 +91482,8 @@ type InterconnectAttachmentsAggregatedListCall struct { // AggregatedList: Retrieves an aggregated list of interconnect // attachments. +// +// - project: Project ID for this request. func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87421,7 +91571,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -87464,7 +91614,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87570,7 +91720,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -87622,6 +91772,11 @@ type InterconnectAttachmentsDeleteCall struct { } // Delete: Deletes the specified interconnect attachment. +// +// - interconnectAttachment: Name of the interconnect attachment to +// delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87676,7 +91831,7 @@ func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87799,6 +91954,11 @@ type InterconnectAttachmentsGetCall struct { } // Get: Returns the specified interconnect attachment. +// +// - interconnectAttachment: Name of the interconnect attachment to +// return. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87844,7 +92004,7 @@ func (c *InterconnectAttachmentsGetCall) Header() http.Header { func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87966,6 +92126,9 @@ type InterconnectAttachmentsInsertCall struct { // Insert: Creates an InterconnectAttachment in the specified project // using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88027,7 +92190,7 @@ func (c *InterconnectAttachmentsInsertCall) Header() http.Header { func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88154,6 +92317,9 @@ type InterconnectAttachmentsListCall struct { // List: Retrieves the list of interconnect attachments contained within // the specified region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88229,7 +92395,7 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -88272,7 +92438,7 @@ func (c *InterconnectAttachmentsListCall) Header() http.Header { func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88381,7 +92547,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -88436,6 +92602,11 @@ type InterconnectAttachmentsPatchCall struct { // Patch: Updates the specified interconnect attachment with the data // included in the request. This method supports PATCH semantics and // uses the JSON merge patch format and processing rules. +// +// - interconnectAttachment: Name of the interconnect attachment to +// patch. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88491,7 +92662,7 @@ func (c *InterconnectAttachmentsPatchCall) Header() http.Header { func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88623,6 +92794,9 @@ type InterconnectLocationsGetCall struct { // Get: Returns the details for the specified interconnect location. // Gets a list of available interconnect locations by making a list() // request. +// +// - interconnectLocation: Name of the interconnect location to return. +// - project: Project ID for this request. func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88667,7 +92841,7 @@ func (c *InterconnectLocationsGetCall) Header() http.Header { func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88779,6 +92953,8 @@ type InterconnectLocationsListCall struct { // List: Retrieves the list of interconnect locations available to the // specified project. +// +// - project: Project ID for this request. func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88853,7 +93029,7 @@ func (c *InterconnectLocationsListCall) PageToken(pageToken string) *Interconnec // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -88896,7 +93072,7 @@ func (c *InterconnectLocationsListCall) Header() http.Header { func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88996,7 +93172,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -89047,6 +93223,9 @@ type InterconnectsDeleteCall struct { } // Delete: Deletes the specified interconnect. +// +// - interconnect: Name of the interconnect to delete. +// - project: Project ID for this request. func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89100,7 +93279,7 @@ func (c *InterconnectsDeleteCall) Header() http.Header { func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89214,6 +93393,9 @@ type InterconnectsGetCall struct { // Get: Returns the specified interconnect. Get a list of available // interconnects by making a list() request. +// +// - interconnect: Name of the interconnect to return. +// - project: Project ID for this request. func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89258,7 +93440,7 @@ func (c *InterconnectsGetCall) Header() http.Header { func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89371,6 +93553,9 @@ type InterconnectsGetDiagnosticsCall struct { // GetDiagnostics: Returns the interconnectDiagnostics for the specified // interconnect. +// +// - interconnect: Name of the interconnect resource to query. +// - project: Project ID for this request. func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89415,7 +93600,7 @@ func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89528,6 +93713,8 @@ type InterconnectsInsertCall struct { // Insert: Creates a Interconnect in the specified project using the // data included in the request. +// +// - project: Project ID for this request. func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89581,7 +93768,7 @@ func (c *InterconnectsInsertCall) Header() http.Header { func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89693,6 +93880,8 @@ type InterconnectsListCall struct { // List: Retrieves the list of interconnect available to the specified // project. +// +// - project: Project ID for this request. func (r *InterconnectsService) List(project string) *InterconnectsListCall { c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89767,7 +93956,7 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -89810,7 +93999,7 @@ func (c *InterconnectsListCall) Header() http.Header { func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89910,7 +94099,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -89964,6 +94153,9 @@ type InterconnectsPatchCall struct { // Patch: Updates the specified interconnect with the data included in // the request. This method supports PATCH semantics and uses the JSON // merge patch format and processing rules. +// +// - interconnect: Name of the interconnect to update. +// - project: Project ID for this request. func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90018,7 +94210,7 @@ func (c *InterconnectsPatchCall) Header() http.Header { func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90142,6 +94334,10 @@ type LicenseCodesGetCall struct { // across all projects that have permissions to read the License Code. // Caution This resource is intended for use only by third-party // partners who are creating Cloud Marketplace images. +// +// - licenseCode: Number corresponding to the License code resource to +// return. +// - project: Project ID for this request. func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90186,7 +94382,7 @@ func (c *LicenseCodesGetCall) Header() http.Header { func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90300,6 +94496,9 @@ type LicenseCodesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. Caution This resource is intended for use only // by third-party partners who are creating Cloud Marketplace images. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90335,7 +94534,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90453,6 +94652,9 @@ type LicensesDeleteCall struct { // Delete: Deletes the specified license. Caution This resource is // intended for use only by third-party partners who are creating Cloud // Marketplace images. +// +// - license: Name of the license resource to delete. +// - project: Project ID for this request. func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90506,7 +94708,7 @@ func (c *LicensesDeleteCall) Header() http.Header { func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90621,6 +94823,9 @@ type LicensesGetCall struct { // Get: Returns the specified License resource. Caution This resource // is intended for use only by third-party partners who are creating // Cloud Marketplace images. +// +// - license: Name of the License resource to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get func (r *LicensesService) Get(project string, license string) *LicensesGetCall { c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -90666,7 +94871,7 @@ func (c *LicensesGetCall) Header() http.Header { func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90781,6 +94986,9 @@ type LicensesGetIamPolicyCall struct { // empty if no such policy or resource exists. Caution This resource is // intended for use only by third-party partners who are creating Cloud // Marketplace images. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90832,7 +95040,7 @@ func (c *LicensesGetIamPolicyCall) Header() http.Header { func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90951,6 +95159,8 @@ type LicensesInsertCall struct { // Insert: Create a License resource in the specified project. Caution // This resource is intended for use only by third-party partners who // are creating Cloud Marketplace images. +// +// - project: Project ID for this request. func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91004,7 +95214,7 @@ func (c *LicensesInsertCall) Header() http.Header { func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91125,6 +95335,8 @@ type LicensesListCall struct { // project, such as debian-cloud or windows-cloud. Caution This // resource is intended for use only by third-party partners who are // creating Cloud Marketplace images. +// +// - project: Project ID for this request. func (r *LicensesService) List(project string) *LicensesListCall { c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91199,7 +95411,7 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -91242,7 +95454,7 @@ func (c *LicensesListCall) Header() http.Header { func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91342,7 +95554,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -91397,6 +95609,9 @@ type LicensesSetIamPolicyCall struct { // resource. Replaces any existing policy. Caution This resource is // intended for use only by third-party partners who are creating Cloud // Marketplace images. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91432,7 +95647,7 @@ func (c *LicensesSetIamPolicyCall) Header() http.Header { func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91550,6 +95765,9 @@ type LicensesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. Caution This resource is intended for use only // by third-party partners who are creating Cloud Marketplace images. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91585,7 +95803,7 @@ func (c *LicensesTestIamPermissionsCall) Header() http.Header { func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91701,6 +95919,8 @@ type MachineTypesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of machine types. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -91789,7 +96009,7 @@ func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTyp // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -91832,7 +96052,7 @@ func (c *MachineTypesAggregatedListCall) Header() http.Header { func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91937,7 +96157,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -91991,6 +96211,10 @@ type MachineTypesGetCall struct { // Get: Returns the specified machine type. Gets a list of available // machine types by making a list() request. +// +// - machineType: Name of the machine type to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -92037,7 +96261,7 @@ func (c *MachineTypesGetCall) Header() http.Header { func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92159,6 +96383,9 @@ type MachineTypesListCall struct { // List: Retrieves a list of machine types available to the specified // project. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -92235,7 +96462,7 @@ func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -92278,7 +96505,7 @@ func (c *MachineTypesListCall) Header() http.Header { func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92380,7 +96607,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -92439,6 +96666,8 @@ type NetworkEndpointGroupsAggregatedListCall struct { // AggregatedList: Retrieves the list of network endpoint groups and // sorts them by zone. +// +// - project: Project ID for this request. func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92526,7 +96755,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -92569,7 +96798,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92675,7 +96904,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -92729,6 +96958,13 @@ type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { // AttachNetworkEndpoints: Attach a list of network endpoints to the // specified network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92784,7 +97020,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92915,6 +97151,12 @@ type NetworkEndpointGroupsDeleteCall struct { // endpoints in the NEG and the VM instances they belong to are not // terminated when the NEG is deleted. Note that the NEG cannot be // deleted if there are backend services referencing it. +// +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92969,7 +97211,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93091,6 +97333,12 @@ type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { // DetachNetworkEndpoints: Detach a list of network endpoints from the // specified network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93146,7 +97394,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93276,6 +97524,12 @@ type NetworkEndpointGroupsGetCall struct { // Get: Returns the specified network endpoint group. Gets a list of // available network endpoint groups by making a list() request. +// +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93321,7 +97575,7 @@ func (c *NetworkEndpointGroupsGetCall) Header() http.Header { func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93441,6 +97695,10 @@ type NetworkEndpointGroupsInsertCall struct { // Insert: Creates a network endpoint group in the specified project // using the parameters that are included in the request. +// +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the network +// endpoint group. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93495,7 +97753,7 @@ func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93616,6 +97874,10 @@ type NetworkEndpointGroupsListCall struct { // List: Retrieves the list of network endpoint groups that are located // in the specified project and zone. +// +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93691,7 +97953,7 @@ func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndp // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -93734,7 +97996,7 @@ func (c *NetworkEndpointGroupsListCall) Header() http.Header { func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93836,7 +98098,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -93896,6 +98158,13 @@ type NetworkEndpointGroupsListNetworkEndpointsCall struct { // ListNetworkEndpoints: Lists the network endpoints in the specified // network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93973,7 +98242,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken stri // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -94006,7 +98275,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94120,7 +98389,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -94183,6 +98452,10 @@ type NetworkEndpointGroupsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94219,7 +98492,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94345,6 +98618,9 @@ type NetworksAddPeeringCall struct { } // AddPeering: Adds a peering to the specified network. +// +// - network: Name of the network resource to add peering to. +// - project: Project ID for this request. func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94399,7 +98675,7 @@ func (c *NetworksAddPeeringCall) Header() http.Header { func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94519,6 +98795,9 @@ type NetworksDeleteCall struct { } // Delete: Deletes the specified network. +// +// - network: Name of the network to delete. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -94573,7 +98852,7 @@ func (c *NetworksDeleteCall) Header() http.Header { func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94687,6 +98966,9 @@ type NetworksGetCall struct { // Get: Returns the specified network. Gets a list of available networks // by making a list() request. +// +// - network: Name of the network to return. +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get func (r *NetworksService) Get(project string, network string) *NetworksGetCall { c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -94732,7 +99014,7 @@ func (c *NetworksGetCall) Header() http.Header { func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94831,107 +99113,101 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } -// method id "compute.networks.insert": +// method id "compute.networks.getEffectiveFirewalls": -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksGetEffectiveFirewallsCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetEffectiveFirewalls: Returns the effective firewalls on a given +// network. +// +// - network: Name of the network for this request. +// - project: Project ID for this request. +func (r *NetworksService) GetEffectiveFirewalls(project string, network string) *NetworksGetEffectiveFirewallsCall { + c := &NetworksGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.network = network return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *NetworksGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *NetworksGetEffectiveFirewallsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *NetworksGetEffectiveFirewallsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *NetworksGetEffectiveFirewallsCall) Context(ctx context.Context) *NetworksGetEffectiveFirewallsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { +func (c *NetworksGetEffectiveFirewallsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/getEffectiveFirewalls") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networks.getEffectiveFirewalls" call. +// Exactly one of *NetworksGetEffectiveFirewallsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *NetworksGetEffectiveFirewallsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*NetworksGetEffectiveFirewallsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94950,7 +99226,7 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworksGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94962,186 +99238,127 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "description": "Returns the effective firewalls on a given network.", + // "httpMethod": "GET", + // "id": "compute.networks.getEffectiveFirewalls", // "parameterOrder": [ - // "project" + // "project", + // "network" // ], // "parameters": { + // "network": { + // "description": "Name of the network for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/networks", - // "request": { - // "$ref": "Network" - // }, + // "path": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", // "response": { - // "$ref": "Operation" + // "$ref": "NetworksGetEffectiveFirewallsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.list": - -type NetworksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "compute.networks.insert": -// List: Retrieves the list of networks available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either `=`, `!=`, `>`, or -// `<`. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. -// -// You can also filter nested fields. For example, you could specify -// `scheduling.automaticRestart = false` to include instances only if -// they are not scheduled for automatic restarts. You can use filtering -// on nested fields to filter based on resource labels. +// Insert: Creates a network in the specified project using the data +// included in the request. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example: ``` (scheduling.automaticRestart = -// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression -// is an `AND` expression. However, you can include `AND` and `OR` -// expressions explicitly. For example: ``` (cpuPlatform = "Intel -// Skylake") OR (cpuPlatform = "Intel Broadwell") AND -// (scheduling.automaticRestart = true) ``` -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// - project: Project ID for this request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using `orderBy="creationTimestamp desc". This sorts -// results based on the `creationTimestamp` field in reverse -// chronological order (newest result first). Use this to sort resources -// like operations so that the newest operation is returned -// first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by `name` or `creationTimestamp desc` is -// supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false and the logic is the same as today. -func (c *NetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { +func (c *NetworksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -95152,14 +99369,14 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95178,7 +99395,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95190,36 +99407,13 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.networks.list", + // "description": "Creates a network in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.networks.insert", // "parameterOrder": [ // "project" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -95227,75 +99421,331 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, // "path": "projects/{project}/global/networks", + // "request": { + // "$ref": "Network" + // }, // "response": { - // "$ref": "NetworkList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.listPeeringRoutes": +// method id "compute.networks.list": -type NetworksListPeeringRoutesCall struct { +type NetworksListCall struct { s *Service project string - network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListPeeringRoutes: Lists the peering routes exchanged over peering -// connection. -func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { - c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - return c -} - -// Direction sets the optional parameter "direction": The direction of -// the exchanged routes. +// List: Retrieves the list of networks available to the specified +// project. // -// Possible values: -// "INCOMING" -// "OUTGOING" -func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("direction", direction) +// - project: Project ID for this request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NetworkList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of networks available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.networks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/networks", + // "response": { + // "$ref": "NetworkList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networks.listPeeringRoutes": + +type NetworksListPeeringRoutesCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListPeeringRoutes: Lists the peering routes exchanged over peering +// connection. +// +// - network: Name of the network for this request. +// - project: Project ID for this request. +func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { + c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Direction sets the optional parameter "direction": The direction of +// the exchanged routes. +// +// Possible values: +// "INCOMING" +// "OUTGOING" +func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("direction", direction) return c } @@ -95382,7 +99832,7 @@ func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeri // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NetworksListPeeringRoutesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListPeeringRoutesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -95425,7 +99875,7 @@ func (c *NetworksListPeeringRoutesCall) Header() http.Header { func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95557,7 +100007,7 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -95611,6 +100061,9 @@ type NetworksPatchCall struct { // Patch: Patches the specified network with the data included in the // request. Only the following fields can be modified: // routingConfig.routingMode. +// +// - network: Name of the network to update. +// - project: Project ID for this request. func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95665,7 +100118,7 @@ func (c *NetworksPatchCall) Header() http.Header { func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95786,6 +100239,9 @@ type NetworksRemovePeeringCall struct { } // RemovePeering: Removes a peering from the specified network. +// +// - network: Name of the network resource to remove peering from. +// - project: Project ID for this request. func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95840,7 +100296,7 @@ func (c *NetworksRemovePeeringCall) Header() http.Header { func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95961,6 +100417,9 @@ type NetworksSwitchToCustomModeCall struct { // SwitchToCustomMode: Switches the network mode from auto subnet mode // to custom subnet mode. +// +// - network: Name of the network to be updated. +// - project: Project ID for this request. func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96014,7 +100473,7 @@ func (c *NetworksSwitchToCustomModeCall) Header() http.Header { func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96130,6 +100589,10 @@ type NetworksUpdatePeeringCall struct { // included in the request Only the following fields can be modified: // NetworkPeering.export_custom_routes, and // NetworkPeering.import_custom_routes +// +// - network: Name of the network resource which the updated peering is +// belonging to. +// - project: Project ID for this request. func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96184,7 +100647,7 @@ func (c *NetworksUpdatePeeringCall) Header() http.Header { func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96306,6 +100769,10 @@ type NodeGroupsAddNodesCall struct { } // AddNodes: Adds specified number of nodes to the node group. +// +// - nodeGroup: Name of the NodeGroup resource. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96361,7 +100828,7 @@ func (c *NodeGroupsAddNodesCall) Header() http.Header { func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96491,6 +100958,8 @@ type NodeGroupsAggregatedListCall struct { // AggregatedList: Retrieves an aggregated list of node groups. Note: // use nodeGroups.listNodes for more details about each group. +// +// - project: Project ID for this request. func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96578,7 +101047,7 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -96621,7 +101090,7 @@ func (c *NodeGroupsAggregatedListCall) Header() http.Header { func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96726,7 +101195,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -96778,6 +101247,10 @@ type NodeGroupsDeleteCall struct { } // Delete: Deletes the specified NodeGroup resource. +// +// - nodeGroup: Name of the NodeGroup resource to delete. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96832,7 +101305,7 @@ func (c *NodeGroupsDeleteCall) Header() http.Header { func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96955,6 +101428,11 @@ type NodeGroupsDeleteNodesCall struct { } // DeleteNodes: Deletes specified nodes from the node group. +// +// - nodeGroup: Name of the NodeGroup resource whose nodes will be +// deleted. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97010,7 +101488,7 @@ func (c *NodeGroupsDeleteNodesCall) Header() http.Header { func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97143,6 +101621,10 @@ type NodeGroupsGetCall struct { // Get: Returns the specified NodeGroup. Get a list of available // NodeGroups by making a list() request. Note: the "nodes" field should // not be used. Use nodeGroups.listNodes instead. +// +// - nodeGroup: Name of the node group to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97188,7 +101670,7 @@ func (c *NodeGroupsGetCall) Header() http.Header { func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97311,6 +101793,10 @@ type NodeGroupsGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97363,7 +101849,7 @@ func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97491,6 +101977,10 @@ type NodeGroupsInsertCall struct { // Insert: Creates a NodeGroup resource in the specified project using // the data included in the request. +// +// - initialNodeCount: Initial count of nodes in the node group. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97546,7 +102036,7 @@ func (c *NodeGroupsInsertCall) Header() http.Header { func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97677,6 +102167,9 @@ type NodeGroupsListCall struct { // List: Retrieves a list of node groups available to the specified // project. Note: use nodeGroups.listNodes for more details about each // group. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97752,7 +102245,7 @@ func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NodeGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -97795,7 +102288,7 @@ func (c *NodeGroupsListCall) Header() http.Header { func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97897,7 +102390,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -97956,6 +102449,11 @@ type NodeGroupsListNodesCall struct { } // ListNodes: Lists nodes in the node group. +// +// - nodeGroup: Name of the NodeGroup resource whose nodes you want to +// list. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98032,7 +102530,7 @@ func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNod // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NodeGroupsListNodesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListNodesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -98065,7 +102563,7 @@ func (c *NodeGroupsListNodesCall) Header() http.Header { func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98173,7 +102671,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -98233,6 +102731,10 @@ type NodeGroupsPatchCall struct { } // Patch: Updates the specified node group. +// +// - nodeGroup: Name of the NodeGroup resource to update. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) Patch(project string, zone string, nodeGroup string, nodegroup *NodeGroup) *NodeGroupsPatchCall { c := &NodeGroupsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98288,7 +102790,7 @@ func (c *NodeGroupsPatchCall) Header() http.Header { func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98420,6 +102922,10 @@ type NodeGroupsSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98456,7 +102962,7 @@ func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98582,6 +103088,10 @@ type NodeGroupsSetNodeTemplateCall struct { } // SetNodeTemplate: Updates the node template of the node group. +// +// - nodeGroup: Name of the NodeGroup resource to update. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98637,7 +103147,7 @@ func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98769,6 +103279,10 @@ type NodeGroupsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98805,7 +103319,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98930,6 +103444,8 @@ type NodeTemplatesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of node templates. +// +// - project: Project ID for this request. func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99017,7 +103533,7 @@ func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTempl // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -99060,7 +103576,7 @@ func (c *NodeTemplatesAggregatedListCall) Header() http.Header { func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99165,7 +103681,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -99217,6 +103733,10 @@ type NodeTemplatesDeleteCall struct { } // Delete: Deletes the specified NodeTemplate resource. +// +// - nodeTemplate: Name of the NodeTemplate resource to delete. +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99271,7 +103791,7 @@ func (c *NodeTemplatesDeleteCall) Header() http.Header { func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99395,6 +103915,10 @@ type NodeTemplatesGetCall struct { // Get: Returns the specified node template. Gets a list of available // node templates by making a list() request. +// +// - nodeTemplate: Name of the node template to return. +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99440,7 +103964,7 @@ func (c *NodeTemplatesGetCall) Header() http.Header { func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99563,6 +104087,10 @@ type NodeTemplatesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99615,7 +104143,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99743,6 +104271,9 @@ type NodeTemplatesInsertCall struct { // Insert: Creates a NodeTemplate resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99797,7 +104328,7 @@ func (c *NodeTemplatesInsertCall) Header() http.Header { func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99919,6 +104450,9 @@ type NodeTemplatesListCall struct { // List: Retrieves a list of node templates available to the specified // project. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99994,7 +104528,7 @@ func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NodeTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -100037,7 +104571,7 @@ func (c *NodeTemplatesListCall) Header() http.Header { func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100146,7 +104680,7 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -100200,6 +104734,10 @@ type NodeTemplatesSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100236,7 +104774,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100363,6 +104901,10 @@ type NodeTemplatesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100399,7 +104941,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100524,6 +105066,8 @@ type NodeTypesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of node types. +// +// - project: Project ID for this request. func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100611,7 +105155,7 @@ func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggr // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NodeTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -100654,7 +105198,7 @@ func (c *NodeTypesAggregatedListCall) Header() http.Header { func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100759,7 +105303,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -100813,6 +105357,10 @@ type NodeTypesGetCall struct { // Get: Returns the specified node type. Gets a list of available node // types by making a list() request. +// +// - nodeType: Name of the node type to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -100858,7 +105406,7 @@ func (c *NodeTypesGetCall) Header() http.Header { func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100980,6 +105528,9 @@ type NodeTypesListCall struct { // List: Retrieves a list of node types available to the specified // project. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101055,7 +105606,7 @@ func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *NodeTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -101098,7 +105649,7 @@ func (c *NodeTypesListCall) Header() http.Header { func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101200,7 +105751,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -101258,6 +105809,8 @@ type PacketMirroringsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of packetMirrorings. +// +// - project: Project ID for this request. func (r *PacketMirroringsService) AggregatedList(project string) *PacketMirroringsAggregatedListCall { c := &PacketMirroringsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101345,7 +105898,7 @@ func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *Packet // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -101388,7 +105941,7 @@ func (c *PacketMirroringsAggregatedListCall) Header() http.Header { func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101493,7 +106046,7 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -101545,6 +106098,10 @@ type PacketMirroringsDeleteCall struct { } // Delete: Deletes the specified PacketMirroring resource. +// +// - packetMirroring: Name of the PacketMirroring resource to delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *PacketMirroringsService) Delete(project string, region string, packetMirroring string) *PacketMirroringsDeleteCall { c := &PacketMirroringsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101599,7 +106156,7 @@ func (c *PacketMirroringsDeleteCall) Header() http.Header { func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101722,6 +106279,10 @@ type PacketMirroringsGetCall struct { } // Get: Returns the specified PacketMirroring resource. +// +// - packetMirroring: Name of the PacketMirroring resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *PacketMirroringsService) Get(project string, region string, packetMirroring string) *PacketMirroringsGetCall { c := &PacketMirroringsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101767,7 +106328,7 @@ func (c *PacketMirroringsGetCall) Header() http.Header { func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101889,6 +106450,9 @@ type PacketMirroringsInsertCall struct { // Insert: Creates a PacketMirroring resource in the specified project // and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *PacketMirroringsService) Insert(project string, region string, packetmirroring *PacketMirroring) *PacketMirroringsInsertCall { c := &PacketMirroringsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101943,7 +106507,7 @@ func (c *PacketMirroringsInsertCall) Header() http.Header { func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102065,6 +106629,9 @@ type PacketMirroringsListCall struct { // List: Retrieves a list of PacketMirroring resources available to the // specified project and region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *PacketMirroringsService) List(project string, region string) *PacketMirroringsListCall { c := &PacketMirroringsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102140,7 +106707,7 @@ func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirrorings // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *PacketMirroringsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -102183,7 +106750,7 @@ func (c *PacketMirroringsListCall) Header() http.Header { func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102284,27 +106851,1865 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/packetMirrorings", + // "response": { + // "$ref": "PacketMirroringList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *PacketMirroringsListCall) Pages(ctx context.Context, f func(*PacketMirroringList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.packetMirrorings.patch": + +type PacketMirroringsPatchCall struct { + s *Service + project string + region string + packetMirroring string + packetmirroring *PacketMirroring + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified PacketMirroring resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. +// +// - packetMirroring: Name of the PacketMirroring resource to patch. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *PacketMirroringsService) Patch(project string, region string, packetMirroring string, packetmirroring *PacketMirroring) *PacketMirroringsPatchCall { + c := &PacketMirroringsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.packetMirroring = packetMirroring + c.packetmirroring = packetmirroring + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *PacketMirroringsPatchCall) RequestId(requestId string) *PacketMirroringsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PacketMirroringsPatchCall) Fields(s ...googleapi.Field) *PacketMirroringsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PacketMirroringsPatchCall) Context(ctx context.Context) *PacketMirroringsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PacketMirroringsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "packetMirroring": c.packetMirroring, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.packetMirrorings.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.packetMirrorings.patch", + // "parameterOrder": [ + // "project", + // "region", + // "packetMirroring" + // ], + // "parameters": { + // "packetMirroring": { + // "description": "Name of the PacketMirroring resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "request": { + // "$ref": "PacketMirroring" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.packetMirrorings.testIamPermissions": + +type PacketMirroringsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *PacketMirroringsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *PacketMirroringsTestIamPermissionsCall { + c := &PacketMirroringsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PacketMirroringsTestIamPermissionsCall) Fields(s ...googleapi.Field) *PacketMirroringsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PacketMirroringsTestIamPermissionsCall) Context(ctx context.Context) *PacketMirroringsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.packetMirrorings.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.packetMirrorings.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.projects.disableXpnHost": + +type ProjectsDisableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DisableXpnHost: Disable this project as a shared VPC host project. +// +// - project: Project ID for this request. +func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { + c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDisableXpnHostCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnHost") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.disableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Disable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnHost", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/disableXpnHost", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.disableXpnResource": + +type ProjectsDisableXpnResourceCall struct { + s *Service + project string + projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DisableXpnResource: Disable a service resource (also known as service +// project) associated with this host project. +// +// - project: Project ID for this request. +func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { + c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDisableXpnResourceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnResource") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.disableXpnResource" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Disable a service resource (also known as service project) associated with this host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnResource", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/disableXpnResource", + // "request": { + // "$ref": "ProjectsDisableXpnResourceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.enableXpnHost": + +type ProjectsEnableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// EnableXpnHost: Enable this project as a shared VPC host project. +// +// - project: Project ID for this request. +func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { + c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsEnableXpnHostCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnHost") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.enableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.enableXpnHost", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/enableXpnHost", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.enableXpnResource": + +type ProjectsEnableXpnResourceCall struct { + s *Service + project string + projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// EnableXpnResource: Enable service resource (a.k.a service project) +// for a host project, so that subnets in the host project can be used +// by instances in the service project. +// +// - project: Project ID for this request. +func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { + c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsEnableXpnResourceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnResource") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.enableXpnResource" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + // "httpMethod": "POST", + // "id": "compute.projects.enableXpnResource", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/enableXpnResource", + // "request": { + // "$ref": "ProjectsEnableXpnResourceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.get": + +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified Project resource. +// +// - project: Project ID for this request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Project resource.", + // "httpMethod": "GET", + // "id": "compute.projects.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.projects.getXpnHost": + +type ProjectsGetXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetXpnHost: Gets the shared VPC host project that this project links +// to. May be empty if no link exists. +// +// - project: Project ID for this request. +func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { + c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetXpnHostCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnHost") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.getXpnHost" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnHost", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/getXpnHost", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.getXpnResources": + +type ProjectsGetXpnResourcesCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetXpnResources: Gets service resources (a.k.a service project) +// associated with this host project. +// +// - project: Project ID for this request. +func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { + c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetXpnResourcesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnResources") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.getXpnResources" call. +// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ProjectsGetXpnResources{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets service resources (a.k.a service project) associated with this host project.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnResources", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/getXpnResources", + // "response": { + // "$ref": "ProjectsGetXpnResources" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.projects.listXpnHosts": + +type ProjectsListXpnHostsCall struct { + s *Service + project string + projectslistxpnhostsrequest *ProjectsListXpnHostsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListXpnHosts: Lists all shared VPC host projects visible to the user +// in an organization. +// +// - project: Project ID for this request. +func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { + c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectslistxpnhostsrequest = projectslistxpnhostsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsListXpnHostsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsListXpnHostsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/listXpnHosts") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.listXpnHosts" call. +// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *XpnHostList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &XpnHostList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all shared VPC host projects visible to the user in an organization.", + // "httpMethod": "POST", + // "id": "compute.projects.listXpnHosts", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings", + // "path": "projects/{project}/listXpnHosts", + // "request": { + // "$ref": "ProjectsListXpnHostsRequest" + // }, // "response": { - // "$ref": "PacketMirroringList" + // "$ref": "XpnHostList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -102313,7 +108718,7 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *PacketMirroringsListCall) Pages(ctx context.Context, f func(*PacketMirroringList) error) error { +func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -102331,28 +108736,24 @@ func (c *PacketMirroringsListCall) Pages(ctx context.Context, f func(*PacketMirr } } -// method id "compute.packetMirrorings.patch": +// method id "compute.projects.moveDisk": -type PacketMirroringsPatchCall struct { +type ProjectsMoveDiskCall struct { s *Service project string - region string - packetMirroring string - packetmirroring *PacketMirroring + diskmoverequest *DiskMoveRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches the specified PacketMirroring resource with the data -// included in the request. This method supports PATCH semantics and -// uses JSON merge patch format and processing rules. -func (r *PacketMirroringsService) Patch(project string, region string, packetMirroring string, packetmirroring *PacketMirroring) *PacketMirroringsPatchCall { - c := &PacketMirroringsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveDisk: Moves a persistent disk from one zone to another. +// +// - project: Project ID for this request. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.packetMirroring = packetMirroring - c.packetmirroring = packetmirroring + c.diskmoverequest = diskmoverequest return c } @@ -102370,7 +108771,7 @@ func (r *PacketMirroringsService) Patch(project string, region string, packetMir // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *PacketMirroringsPatchCall) RequestId(requestId string) *PacketMirroringsPatchCall { +func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102378,7 +108779,7 @@ func (c *PacketMirroringsPatchCall) RequestId(requestId string) *PacketMirroring // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsPatchCall) Fields(s ...googleapi.Field) *PacketMirroringsPatchCall { +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102386,58 +108787,56 @@ func (c *PacketMirroringsPatchCall) Fields(s ...googleapi.Field) *PacketMirrorin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsPatchCall) Context(ctx context.Context) *PacketMirroringsPatchCall { +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsPatchCall) Header() http.Header { +func (c *ProjectsMoveDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveDisk") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "packetMirroring": c.packetMirroring, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.patch" call. +// Do executes the "compute.projects.moveDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102468,22 +108867,13 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.packetMirrorings.patch", + // "description": "Moves a persistent disk from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveDisk", // "parameterOrder": [ - // "project", - // "region", - // "packetMirroring" + // "project" // ], // "parameters": { - // "packetMirroring": { - // "description": "Name of the PacketMirroring resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -102491,22 +108881,15 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "path": "projects/{project}/moveDisk", // "request": { - // "$ref": "PacketMirroring" + // "$ref": "DiskMoveRequest" // }, // "response": { // "$ref": "Operation" @@ -102519,34 +108902,51 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.packetMirrorings.testIamPermissions": +// method id "compute.projects.moveInstance": -type PacketMirroringsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *PacketMirroringsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *PacketMirroringsTestIamPermissionsCall { - c := &PacketMirroringsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +// +// - project: Project ID for this request. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instancemoverequest = instancemoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsTestIamPermissionsCall) Fields(s ...googleapi.Field) *PacketMirroringsTestIamPermissionsCall { +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102554,36 +108954,36 @@ func (c *PacketMirroringsTestIamPermissionsCall) Fields(s ...googleapi.Field) *P // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsTestIamPermissionsCall) Context(ctx context.Context) *PacketMirroringsTestIamPermissionsCall { +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { +func (c *ProjectsMoveInstanceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102591,21 +108991,19 @@ func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Re } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.projects.moveInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102624,7 +109022,7 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102636,13 +109034,11 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Moves an instance and its attached persistent disks from one zone to another.", // "httpMethod": "POST", - // "id": "compute.packetMirrorings.testIamPermissions", + // "id": "compute.projects.moveInstance", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -102652,51 +109048,47 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + // "path": "projects/{project}/moveInstance", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceMoveRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.projects.disableXpnHost": +// method id "compute.projects.setCommonInstanceMetadata": -type ProjectsDisableXpnHostCall struct { +type ProjectsSetCommonInstanceMetadataCall struct { s *Service project string + metadata *Metadata urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// DisableXpnHost: Disable this project as a shared VPC host project. -func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { - c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// +// - project: Project ID for this request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.metadata = metadata return c } @@ -102714,7 +109106,7 @@ func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHost // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { +func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102722,7 +109114,7 @@ func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisabl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102730,31 +109122,36 @@ func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisab // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnHostCall) Header() http.Header { +func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102767,14 +109164,14 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnHost" call. +// Do executes the "compute.projects.setCommonInstanceMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102805,9 +109202,9 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project.", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnHost", + // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ // "project" // ], @@ -102825,7 +109222,10 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "projects/{project}/disableXpnHost", + // "path": "projects/{project}/setCommonInstanceMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { // "$ref": "Operation" // }, @@ -102837,23 +109237,27 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.projects.disableXpnResource": +// method id "compute.projects.setDefaultNetworkTier": -type ProjectsDisableXpnResourceCall struct { - s *Service - project string - projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetDefaultNetworkTierCall struct { + s *Service + project string + projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnResource: Disable a service resource (also known as service -// project) associated with this host project. -func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { - c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDefaultNetworkTier: Sets the default network tier of the project. +// The default network tier is used when an +// address/forwardingRule/instance is created without specifying the +// network tier field. +// +// - project: Project ID for this request. +func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { + c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest return c } @@ -102871,7 +109275,7 @@ func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnr // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { +func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102879,7 +109283,7 @@ func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { +func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102887,36 +109291,36 @@ func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { +func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnResourceCall) Header() http.Header { +func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102929,14 +109333,14 @@ func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnResource" call. +// Do executes the "compute.projects.setDefaultNetworkTier" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102967,9 +109371,9 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a service resource (also known as service project) associated with this host project.", + // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnResource", + // "id": "compute.projects.setDefaultNetworkTier", // "parameterOrder": [ // "project" // ], @@ -102987,9 +109391,9 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "projects/{project}/disableXpnResource", + // "path": "projects/{project}/setDefaultNetworkTier", // "request": { - // "$ref": "ProjectsDisableXpnResourceRequest" + // "$ref": "ProjectsSetDefaultNetworkTierRequest" // }, // "response": { // "$ref": "Operation" @@ -103002,20 +109406,28 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.projects.enableXpnHost": +// method id "compute.projects.setUsageExportBucket": -type ProjectsEnableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. -func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { - c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// +// - project: Project ID for this request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.usageexportlocation = usageexportlocation return c } @@ -103033,7 +109445,7 @@ func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("requestId", requestId) return c } @@ -103041,7 +109453,7 @@ func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableX // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103049,31 +109461,36 @@ func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnable // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnHostCall) Header() http.Header { +func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -103086,14 +109503,14 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnHost" call. +// Do executes the "compute.projects.setUsageExportBucket" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103124,9 +109541,9 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project.", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", // "httpMethod": "POST", - // "id": "compute.projects.enableXpnHost", + // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ // "project" // ], @@ -103144,36 +109561,44 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "projects/{project}/enableXpnHost", + // "path": "projects/{project}/setUsageExportBucket", + // "request": { + // "$ref": "UsageExportLocation" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.projects.enableXpnResource": +// method id "compute.publicAdvertisedPrefixes.delete": -type ProjectsEnableXpnResourceCall struct { - s *Service - project string - projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PublicAdvertisedPrefixesDeleteCall struct { + s *Service + project string + publicAdvertisedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnResource: Enable service resource (a.k.a service project) -// for a host project, so that subnets in the host project can be used -// by instances in the service project. -func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { - c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified PublicAdvertisedPrefix +// +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to delete. +func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall { + c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest + c.publicAdvertisedPrefix = publicAdvertisedPrefix return c } @@ -103191,7 +109616,7 @@ func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { +func (c *PublicAdvertisedPrefixesDeleteCall) RequestId(requestId string) *PublicAdvertisedPrefixesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -103199,7 +109624,7 @@ func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEna // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { +func (c *PublicAdvertisedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103207,56 +109632,52 @@ func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { +func (c *PublicAdvertisedPrefixesDeleteCall) Context(ctx context.Context) *PublicAdvertisedPrefixesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnResourceCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnResource" call. +// Do executes the "compute.publicAdvertisedPrefixes.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103287,11 +109708,12 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnResource", + // "description": "Deletes the specified PublicAdvertisedPrefix", + // "httpMethod": "DELETE", + // "id": "compute.publicAdvertisedPrefixes.delete", // "parameterOrder": [ - // "project" + // "project", + // "publicAdvertisedPrefix" // ], // "parameters": { // "project": { @@ -103301,16 +109723,20 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, + // "publicAdvertisedPrefix": { + // "description": "Name of the PublicAdvertisedPrefix resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/enableXpnResource", - // "request": { - // "$ref": "ProjectsEnableXpnResourceRequest" - // }, + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", // "response": { // "$ref": "Operation" // }, @@ -103322,29 +109748,34 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.projects.get": +// method id "compute.publicAdvertisedPrefixes.get": -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PublicAdvertisedPrefixesGetCall struct { + s *Service + project string + publicAdvertisedPrefix string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Project resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified PublicAdvertisedPrefix resource. +// +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to return. +func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall { + c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.publicAdvertisedPrefix = publicAdvertisedPrefix return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103354,7 +109785,7 @@ func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { +func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -103362,23 +109793,23 @@ func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *PublicAdvertisedPrefixesGetCall) Context(ctx context.Context) *PublicAdvertisedPrefixesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103389,7 +109820,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103397,19 +109828,20 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.publicAdvertisedPrefixes.get" call. +// Exactly one of *PublicAdvertisedPrefix or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PublicAdvertisedPrefix.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefix, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103428,7 +109860,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &PublicAdvertisedPrefix{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103440,11 +109872,12 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource.", + // "description": "Returns the specified PublicAdvertisedPrefix resource.", // "httpMethod": "GET", - // "id": "compute.projects.get", + // "id": "compute.publicAdvertisedPrefixes.get", // "parameterOrder": [ - // "project" + // "project", + // "publicAdvertisedPrefix" // ], // "parameters": { // "project": { @@ -103453,11 +109886,18 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "publicAdvertisedPrefix": { + // "description": "Name of the PublicAdvertisedPrefix resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}", + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", // "response": { - // "$ref": "Project" + // "$ref": "PublicAdvertisedPrefix" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103468,76 +109908,90 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } -// method id "compute.projects.getXpnHost": +// method id "compute.publicAdvertisedPrefixes.insert": -type ProjectsGetXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PublicAdvertisedPrefixesInsertCall struct { + s *Service + project string + publicadvertisedprefix *PublicAdvertisedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetXpnHost: Gets the shared VPC host project that this project links -// to. May be empty if no link exists. -func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { - c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a PublicAdvertisedPrefix in the specified project +// using the parameters that are included in the request. +// +// - project: Project ID for this request. +func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesInsertCall { + c := &PublicAdvertisedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.publicadvertisedprefix = publicadvertisedprefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *PublicAdvertisedPrefixesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { +func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { +func (c *PublicAdvertisedPrefixesInsertCall) Context(ctx context.Context) *PublicAdvertisedPrefixesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnHostCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -103548,14 +110002,14 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnHost" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.publicAdvertisedPrefixes.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103574,7 +110028,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103586,9 +110040,9 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnHost", + // "description": "Creates a PublicAdvertisedPrefix in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.publicAdvertisedPrefixes.insert", // "parameterOrder": [ // "project" // ], @@ -103599,11 +110053,19 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/getXpnHost", + // "path": "projects/{project}/global/publicAdvertisedPrefixes", + // "request": { + // "$ref": "PublicAdvertisedPrefix" + // }, // "response": { - // "$ref": "Project" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103613,9 +110075,9 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } -// method id "compute.projects.getXpnResources": +// method id "compute.publicAdvertisedPrefixes.list": -type ProjectsGetXpnResourcesCall struct { +type PublicAdvertisedPrefixesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -103624,10 +110086,11 @@ type ProjectsGetXpnResourcesCall struct { header_ http.Header } -// GetXpnResources: Gets service resources (a.k.a service project) -// associated with this host project. -func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { - c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the PublicAdvertisedPrefixes for a project. +// +// - project: Project ID for this request. +func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertisedPrefixesListCall { + c := &PublicAdvertisedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -103655,7 +110118,7 @@ func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourc // expressions explicitly. For example: ``` (cpuPlatform = "Intel // Skylake") OR (cpuPlatform = "Intel Broadwell") AND // (scheduling.automaticRestart = true) ``` -func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { +func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("filter", filter) return c } @@ -103666,7 +110129,7 @@ func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResou // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { +func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -103684,7 +110147,7 @@ func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetX // // Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { +func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -103692,7 +110155,7 @@ func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnRes // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { +func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -103700,8 +110163,8 @@ func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXp // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. -func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsGetXpnResourcesCall { +// false. +func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -103709,7 +110172,7 @@ func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { +func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103719,7 +110182,7 @@ func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetX // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { +func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesListCall { c.ifNoneMatch_ = entityTag return c } @@ -103727,23 +110190,23 @@ func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { +func (c *PublicAdvertisedPrefixesListCall) Context(ctx context.Context) *PublicAdvertisedPrefixesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnResourcesCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103754,7 +110217,7 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103767,14 +110230,14 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnResources" call. -// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.publicAdvertisedPrefixes.list" call. +// Exactly one of *PublicAdvertisedPrefixList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *PublicAdvertisedPrefixList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { +func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefixList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103793,7 +110256,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ProjectsGetXpnResources{ + ret := &PublicAdvertisedPrefixList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103805,9 +110268,9 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Gets service resources (a.k.a service project) associated with this host project.", + // "description": "Lists the PublicAdvertisedPrefixes for a project.", // "httpMethod": "GET", - // "id": "compute.projects.getXpnResources", + // "id": "compute.publicAdvertisedPrefixes.list", // "parameterOrder": [ // "project" // ], @@ -103843,18 +110306,19 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/getXpnResources", + // "path": "projects/{project}/global/publicAdvertisedPrefixes", // "response": { - // "$ref": "ProjectsGetXpnResources" + // "$ref": "PublicAdvertisedPrefixList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } @@ -103863,7 +110327,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { +func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*PublicAdvertisedPrefixList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -103881,23 +110345,205 @@ func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*Project } } -// method id "compute.projects.listXpnHosts": +// method id "compute.publicAdvertisedPrefixes.patch": -type ProjectsListXpnHostsCall struct { - s *Service - project string - projectslistxpnhostsrequest *ProjectsListXpnHostsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PublicAdvertisedPrefixesPatchCall struct { + s *Service + project string + publicAdvertisedPrefix string + publicadvertisedprefix *PublicAdvertisedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListXpnHosts: Lists all shared VPC host projects visible to the user -// in an organization. -func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { - c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified Router resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. +// +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to patch. +func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall { + c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicAdvertisedPrefix = publicAdvertisedPrefix + c.publicadvertisedprefix = publicadvertisedprefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicAdvertisedPrefixesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PublicAdvertisedPrefixesPatchCall) Context(ctx context.Context) *PublicAdvertisedPrefixesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PublicAdvertisedPrefixesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.publicAdvertisedPrefixes.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.publicAdvertisedPrefixes.patch", + // "parameterOrder": [ + // "project", + // "publicAdvertisedPrefix" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicAdvertisedPrefix": { + // "description": "Name of the PublicAdvertisedPrefix resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "request": { + // "$ref": "PublicAdvertisedPrefix" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.publicDelegatedPrefixes.aggregatedList": + +type PublicDelegatedPrefixesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Lists all PublicDelegatedPrefix resources owned by +// the specific project across all scopes. +// +// - project: Name of the project scoping this request. +func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicDelegatedPrefixesAggregatedListCall { + c := &PublicDelegatedPrefixesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectslistxpnhostsrequest = projectslistxpnhostsrequest return c } @@ -103924,18 +110570,31 @@ func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsreque // expressions explicitly. For example: ``` (cpuPlatform = "Intel // Skylake") OR (cpuPlatform = "Intel Broadwell") AND // (scheduling.automaticRestart = true) ``` -func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *PublicDelegatedPrefixesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PublicDelegatedPrefixesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -103953,7 +110612,7 @@ func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpn // // Currently, only sorting by `name` or `creationTimestamp desc` is // supported. -func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -103961,7 +110620,7 @@ func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHosts // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -103969,8 +110628,8 @@ func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnH // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. -func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsListXpnHostsCall { +// false. +func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -103978,46 +110637,54 @@ func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess boo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) Context(ctx context.Context) *PublicDelegatedPrefixesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsListXpnHostsCall) Header() http.Header { +func (c *PublicDelegatedPrefixesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/listXpnHosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -104028,14 +110695,15 @@ func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.listXpnHosts" call. -// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *XpnHostList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { +// Do executes the "compute.publicDelegatedPrefixes.aggregatedList" call. +// Exactly one of *PublicDelegatedPrefixAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *PublicDelegatedPrefixAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104054,7 +110722,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &XpnHostList{ + ret := &PublicDelegatedPrefixAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104066,9 +110734,9 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "Lists all shared VPC host projects visible to the user in an organization.", - // "httpMethod": "POST", - // "id": "compute.projects.listXpnHosts", + // "description": "Lists all PublicDelegatedPrefix resources owned by the specific project across all scopes.", + // "httpMethod": "GET", + // "id": "compute.publicDelegatedPrefixes.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -104078,6 +110746,11 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -104097,28 +110770,26 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/listXpnHosts", - // "request": { - // "$ref": "ProjectsListXpnHostsRequest" - // }, + // "path": "projects/{project}/aggregated/publicDelegatedPrefixes", // "response": { - // "$ref": "XpnHostList" + // "$ref": "PublicDelegatedPrefixAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } @@ -104127,7 +110798,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { +func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -104145,22 +110816,30 @@ func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostLis } } -// method id "compute.projects.moveDisk": +// method id "compute.publicDelegatedPrefixes.delete": -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PublicDelegatedPrefixesDeleteCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified PublicDelegatedPrefix in the given +// region. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall { + c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.diskmoverequest = diskmoverequest + c.region = region + c.publicDelegatedPrefix = publicDelegatedPrefix return c } @@ -104178,7 +110857,7 @@ func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequ // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { +func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicDelegatedPrefixesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104186,7 +110865,7 @@ func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { +func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104194,56 +110873,53 @@ func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { +func (c *PublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *PublicDelegatedPrefixesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveDiskCall) Header() http.Header { +func (c *PublicDelegatedPrefixesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveDisk" call. +// Do executes the "compute.publicDelegatedPrefixes.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104274,11 +110950,13 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", + // "description": "Deletes the specified PublicDelegatedPrefix in the given region.", + // "httpMethod": "DELETE", + // "id": "compute.publicDelegatedPrefixes.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "publicDelegatedPrefix" // ], // "parameters": { // "project": { @@ -104288,16 +110966,27 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/moveDisk", - // "request": { - // "$ref": "DiskMoveRequest" - // }, + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "response": { // "$ref": "Operation" // }, @@ -104309,106 +110998,105 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.projects.moveInstance": +// method id "compute.publicDelegatedPrefixes.get": -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.instancemoverequest = instancemoverequest - return c +type PublicDelegatedPrefixesGetCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Get: Returns the specified PublicDelegatedPrefix resource in the +// given region. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { - c.urlParams_.Set("requestId", requestId) +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall { + c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.publicDelegatedPrefix = publicDelegatedPrefix return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { +func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { +func (c *PublicDelegatedPrefixesGetCall) Context(ctx context.Context) *PublicDelegatedPrefixesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveInstanceCall) Header() http.Header { +func (c *PublicDelegatedPrefixesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.publicDelegatedPrefixes.get" call. +// Exactly one of *PublicDelegatedPrefix or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104427,7 +111115,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104439,11 +111127,13 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", + // "description": "Returns the specified PublicDelegatedPrefix resource in the given region.", + // "httpMethod": "GET", + // "id": "compute.publicDelegatedPrefixes.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "publicDelegatedPrefix" // ], // "parameters": { // "project": { @@ -104453,45 +111143,57 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/moveInstance", - // "request": { - // "$ref": "InstanceMoveRequest" - // }, + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "response": { - // "$ref": "Operation" + // "$ref": "PublicDelegatedPrefix" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setCommonInstanceMetadata": +// method id "compute.publicDelegatedPrefixes.insert": -type ProjectsSetCommonInstanceMetadataCall struct { - s *Service - project string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PublicDelegatedPrefixesInsertCall struct { + s *Service + project string + region string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a PublicDelegatedPrefix in the specified project in +// the given region using the parameters that are included in the +// request. +// +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) Insert(project string, region string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesInsertCall { + c := &PublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.metadata = metadata + c.region = region + c.publicdelegatedprefix = publicdelegatedprefix return c } @@ -104509,7 +111211,7 @@ func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Me // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { +func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicDelegatedPrefixesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104517,7 +111219,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *Pro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { +func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104525,36 +111227,36 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *Pr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { +func (c *PublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *PublicDelegatedPrefixesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { +func (c *PublicDelegatedPrefixesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -104563,18 +111265,19 @@ func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Res req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setCommonInstanceMetadata" call. +// Do executes the "compute.publicDelegatedPrefixes.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104605,11 +111308,12 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + // "description": "Creates a PublicDelegatedPrefix in the specified project in the given region using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", + // "id": "compute.publicDelegatedPrefixes.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -104619,15 +111323,22 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/setCommonInstanceMetadata", + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", // "request": { - // "$ref": "Metadata" + // "$ref": "PublicDelegatedPrefix" // }, // "response": { // "$ref": "Operation" @@ -104640,108 +111351,174 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } -// method id "compute.projects.setDefaultNetworkTier": +// method id "compute.publicDelegatedPrefixes.list": -type ProjectsSetDefaultNetworkTierCall struct { - s *Service - project string - projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PublicDelegatedPrefixesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetDefaultNetworkTier: Sets the default network tier of the project. -// The default network tier is used when an -// address/forwardingRule/instance is created without specifying the -// network tier field. -func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { - c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the PublicDelegatedPrefixes for a project in the given +// region. +// +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) List(project string, region string) *PublicDelegatedPrefixesListCall { + c := &PublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDelegatedPrefixesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { +func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { +func (c *PublicDelegatedPrefixesListCall) Context(ctx context.Context) *PublicDelegatedPrefixesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { +func (c *PublicDelegatedPrefixesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setDefaultNetworkTier" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.publicDelegatedPrefixes.list" call. +// Exactly one of *PublicDelegatedPrefixList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104760,7 +111537,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &PublicDelegatedPrefixList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104772,13 +111549,37 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", - // "httpMethod": "POST", - // "id": "compute.projects.setDefaultNetworkTier", + // "description": "Lists the PublicDelegatedPrefixes for a project in the given region.", + // "httpMethod": "GET", + // "id": "compute.publicDelegatedPrefixes.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -104786,47 +111587,80 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/setDefaultNetworkTier", - // "request": { - // "$ref": "ProjectsSetDefaultNetworkTierRequest" - // }, + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", // "response": { - // "$ref": "Operation" + // "$ref": "PublicDelegatedPrefixList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setUsageExportBucket": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.publicDelegatedPrefixes.patch": + +type PublicDelegatedPrefixesPatchCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified PublicDelegatedPrefix resource with the +// data included in the request. This method supports PATCH semantics +// and uses JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. +// - region: Name of the region for this request. +func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall { + c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.usageexportlocation = usageexportlocation + c.region = region + c.publicDelegatedPrefix = publicDelegatedPrefix + c.publicdelegatedprefix = publicdelegatedprefix return c } @@ -104844,7 +111678,7 @@ func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocati // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { +func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDelegatedPrefixesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104852,7 +111686,7 @@ func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *Projects // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { +func (c *PublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104860,56 +111694,58 @@ func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *Project // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { +func (c *PublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *PublicDelegatedPrefixesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { +func (c *PublicDelegatedPrefixesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setUsageExportBucket" call. +// Do executes the "compute.publicDelegatedPrefixes.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104940,11 +111776,13 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", + // "description": "Patches the specified PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.publicDelegatedPrefixes.patch", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "publicDelegatedPrefix" // ], // "parameters": { // "project": { @@ -104954,25 +111792,36 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/setUsageExportBucket", + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "request": { - // "$ref": "UsageExportLocation" + // "$ref": "PublicDelegatedPrefix" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -104991,6 +111840,10 @@ type RegionAutoscalersDeleteCall struct { } // Delete: Deletes the specified autoscaler. +// +// - autoscaler: Name of the autoscaler to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105045,7 +111898,7 @@ func (c *RegionAutoscalersDeleteCall) Header() http.Header { func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105168,6 +112021,10 @@ type RegionAutoscalersGetCall struct { } // Get: Returns the specified autoscaler. +// +// - autoscaler: Name of the autoscaler to return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105213,7 +112070,7 @@ func (c *RegionAutoscalersGetCall) Header() http.Header { func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105335,6 +112192,9 @@ type RegionAutoscalersInsertCall struct { // Insert: Creates an autoscaler in the specified project using the data // included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105389,7 +112249,7 @@ func (c *RegionAutoscalersInsertCall) Header() http.Header { func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105511,6 +112371,9 @@ type RegionAutoscalersListCall struct { // List: Retrieves a list of autoscalers contained within the specified // region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105586,7 +112449,7 @@ func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscale // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -105629,7 +112492,7 @@ func (c *RegionAutoscalersListCall) Header() http.Header { func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105738,7 +112601,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -105792,6 +112655,9 @@ type RegionAutoscalersPatchCall struct { // Patch: Updates an autoscaler in the specified project using the data // included in the request. This method supports PATCH semantics and // uses the JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105853,7 +112719,7 @@ func (c *RegionAutoscalersPatchCall) Header() http.Header { func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105981,6 +112847,9 @@ type RegionAutoscalersUpdateCall struct { // Update: Updates an autoscaler in the specified project using the data // included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106042,7 +112911,7 @@ func (c *RegionAutoscalersUpdateCall) Header() http.Header { func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106169,6 +113038,10 @@ type RegionBackendServicesDeleteCall struct { } // Delete: Deletes the specified regional BackendService resource. +// +// - backendService: Name of the BackendService resource to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106223,7 +113096,7 @@ func (c *RegionBackendServicesDeleteCall) Header() http.Header { func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106346,6 +113219,10 @@ type RegionBackendServicesGetCall struct { } // Get: Returns the specified regional BackendService resource. +// +// - backendService: Name of the BackendService resource to return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106391,7 +113268,7 @@ func (c *RegionBackendServicesGetCall) Header() http.Header { func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106514,6 +113391,11 @@ type RegionBackendServicesGetHealthCall struct { // GetHealth: Gets the most recent health check results for this // regional BackendService. +// +// - backendService: Name of the BackendService resource for which to +// get health. +// - project: . +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106550,7 +113432,7 @@ func (c *RegionBackendServicesGetHealthCall) Header() http.Header { func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106677,6 +113559,9 @@ type RegionBackendServicesInsertCall struct { // Insert: Creates a regional BackendService resource in the specified // project using the data included in the request. For more information, // see Backend services overview. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106731,7 +113616,7 @@ func (c *RegionBackendServicesInsertCall) Header() http.Header { func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106853,6 +113738,9 @@ type RegionBackendServicesListCall struct { // List: Retrieves the list of regional BackendService resources // available to the specified project in the given region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106928,7 +113816,7 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionBackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionBackendServicesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -106971,7 +113859,7 @@ func (c *RegionBackendServicesListCall) Header() http.Header { func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107080,7 +113968,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -107136,6 +114024,10 @@ type RegionBackendServicesPatchCall struct { // the data included in the request. For more information, see // Understanding backend services This method supports PATCH semantics // and uses the JSON merge patch format and processing rules. +// +// - backendService: Name of the BackendService resource to patch. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107191,7 +114083,7 @@ func (c *RegionBackendServicesPatchCall) Header() http.Header { func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107324,6 +114216,10 @@ type RegionBackendServicesUpdateCall struct { // Update: Updates the specified regional BackendService resource with // the data included in the request. For more information, see Backend // services overview. +// +// - backendService: Name of the BackendService resource to update. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107379,7 +114275,7 @@ func (c *RegionBackendServicesUpdateCall) Header() http.Header { func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107508,6 +114404,8 @@ type RegionCommitmentsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of commitments. +// +// - project: Project ID for this request. func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107595,7 +114493,7 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -107638,7 +114536,7 @@ func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107743,7 +114641,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -107797,6 +114695,10 @@ type RegionCommitmentsGetCall struct { // Get: Returns the specified commitment resource. Gets a list of // available commitments by making a list() request. +// +// - commitment: Name of the commitment to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107842,7 +114744,7 @@ func (c *RegionCommitmentsGetCall) Header() http.Header { func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107964,6 +114866,9 @@ type RegionCommitmentsInsertCall struct { // Insert: Creates a commitment in the specified project using the data // included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108018,7 +114923,7 @@ func (c *RegionCommitmentsInsertCall) Header() http.Header { func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108140,6 +115045,9 @@ type RegionCommitmentsListCall struct { // List: Retrieves a list of commitments contained within the specified // region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108215,7 +115123,7 @@ func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmen // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionCommitmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -108258,7 +115166,7 @@ func (c *RegionCommitmentsListCall) Header() http.Header { func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108367,7 +115275,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -108421,6 +115329,10 @@ type RegionDiskTypesGetCall struct { // Get: Returns the specified regional disk type. Gets a list of // available disk types by making a list() request. +// +// - diskType: Name of the disk type to return. +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108466,7 +115378,7 @@ func (c *RegionDiskTypesGetCall) Header() http.Header { func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108588,6 +115500,9 @@ type RegionDiskTypesListCall struct { // List: Retrieves a list of regional disk types available to the // specified project. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108663,7 +115578,7 @@ func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionDiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDiskTypesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -108706,7 +115621,7 @@ func (c *RegionDiskTypesListCall) Header() http.Header { func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108815,7 +115730,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -108870,6 +115785,10 @@ type RegionDisksAddResourcePoliciesCall struct { // AddResourcePolicies: Adds existing resource policies to a regional // disk. You can only add one policy which will be applied to this disk // for scheduling snapshot creation. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108925,7 +115844,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109056,6 +115975,10 @@ type RegionDisksCreateSnapshotCall struct { } // CreateSnapshot: Creates a snapshot of this regional disk. +// +// - disk: Name of the regional persistent disk to snapshot. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109111,7 +116034,7 @@ func (c *RegionDisksCreateSnapshotCall) Header() http.Header { func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109244,6 +116167,10 @@ type RegionDisksDeleteCall struct { // regional disk removes all the replicas of its data permanently and is // irreversible. However, deleting a disk does not delete any snapshots // previously made from the disk. You must separately delete snapshots. +// +// - disk: Name of the regional persistent disk to delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109298,7 +116225,7 @@ func (c *RegionDisksDeleteCall) Header() http.Header { func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109420,6 +116347,10 @@ type RegionDisksGetCall struct { } // Get: Returns a specified regional persistent disk. +// +// - disk: Name of the regional persistent disk to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109465,7 +116396,7 @@ func (c *RegionDisksGetCall) Header() http.Header { func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109588,6 +116519,10 @@ type RegionDisksGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *RegionDisksService) GetIamPolicy(project string, region string, resource string) *RegionDisksGetIamPolicyCall { c := &RegionDisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109640,7 +116575,7 @@ func (c *RegionDisksGetIamPolicyCall) Header() http.Header { func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109768,6 +116703,9 @@ type RegionDisksInsertCall struct { // Insert: Creates a persistent regional disk in the specified project // using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109829,7 +116767,7 @@ func (c *RegionDisksInsertCall) Header() http.Header { func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109956,6 +116894,9 @@ type RegionDisksListCall struct { // List: Retrieves the list of persistent disks contained within the // specified region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110031,7 +116972,7 @@ func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionDisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDisksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -110074,7 +117015,7 @@ func (c *RegionDisksListCall) Header() http.Header { func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110183,7 +117124,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -110237,6 +117178,10 @@ type RegionDisksRemoveResourcePoliciesCall struct { // RemoveResourcePolicies: Removes resource policies from a regional // disk. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - region: The name of the region for this request. func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110292,7 +117237,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110423,6 +117368,10 @@ type RegionDisksResizeCall struct { } // Resize: Resizes the specified regional persistent disk. +// +// - disk: Name of the regional persistent disk. +// - project: The project ID for this request. +// - region: Name of the region for this request. func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110478,7 +117427,7 @@ func (c *RegionDisksResizeCall) Header() http.Header { func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110610,6 +117559,10 @@ type RegionDisksSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *RegionDisksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionDisksSetIamPolicyCall { c := &RegionDisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110646,7 +117599,7 @@ func (c *RegionDisksSetIamPolicyCall) Header() http.Header { func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110772,6 +117725,10 @@ type RegionDisksSetLabelsCall struct { } // SetLabels: Sets the labels on the target regional disk. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110827,7 +117784,7 @@ func (c *RegionDisksSetLabelsCall) Header() http.Header { func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110959,6 +117916,10 @@ type RegionDisksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110995,7 +117956,7 @@ func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111121,6 +118082,11 @@ type RegionHealthCheckServicesDeleteCall struct { } // Delete: Deletes the specified regional HealthCheckService. +// +// - healthCheckService: Name of the HealthCheckService to delete. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111175,7 +118141,7 @@ func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111297,6 +118263,11 @@ type RegionHealthCheckServicesGetCall struct { } // Get: Returns the specified regional HealthCheckService resource. +// +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111342,7 +118313,7 @@ func (c *RegionHealthCheckServicesGetCall) Header() http.Header { func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111463,6 +118434,9 @@ type RegionHealthCheckServicesInsertCall struct { // Insert: Creates a regional HealthCheckService resource in the // specified project and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Insert(project string, region string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesInsertCall { c := &RegionHealthCheckServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111517,7 +118491,7 @@ func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111639,6 +118613,9 @@ type RegionHealthCheckServicesListCall struct { // List: Lists all the HealthCheckService resources that have been // configured for the specified project in the given region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) List(project string, region string) *RegionHealthCheckServicesListCall { c := &RegionHealthCheckServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111714,7 +118691,7 @@ func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionH // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionHealthCheckServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthCheckServicesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -111757,7 +118734,7 @@ func (c *RegionHealthCheckServicesListCall) Header() http.Header { func (c *RegionHealthCheckServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111866,7 +118843,7 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -111921,6 +118898,11 @@ type RegionHealthCheckServicesPatchCall struct { // Patch: Updates the specified regional HealthCheckService resource // with the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. +// +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Patch(project string, region string, healthCheckService string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesPatchCall { c := &RegionHealthCheckServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111976,7 +118958,7 @@ func (c *RegionHealthCheckServicesPatchCall) Header() http.Header { func (c *RegionHealthCheckServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112105,6 +119087,10 @@ type RegionHealthChecksDeleteCall struct { } // Delete: Deletes the specified HealthCheck resource. +// +// - healthCheck: Name of the HealthCheck resource to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112159,7 +119145,7 @@ func (c *RegionHealthChecksDeleteCall) Header() http.Header { func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112283,6 +119269,10 @@ type RegionHealthChecksGetCall struct { // Get: Returns the specified HealthCheck resource. Gets a list of // available health checks by making a list() request. +// +// - healthCheck: Name of the HealthCheck resource to return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112328,7 +119318,7 @@ func (c *RegionHealthChecksGetCall) Header() http.Header { func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112450,6 +119440,9 @@ type RegionHealthChecksInsertCall struct { // Insert: Creates a HealthCheck resource in the specified project using // the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112504,7 +119497,7 @@ func (c *RegionHealthChecksInsertCall) Header() http.Header { func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112626,6 +119619,9 @@ type RegionHealthChecksListCall struct { // List: Retrieves the list of HealthCheck resources available to the // specified project. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112701,7 +119697,7 @@ func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthCh // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthChecksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -112744,7 +119740,7 @@ func (c *RegionHealthChecksListCall) Header() http.Header { func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112853,7 +119849,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -112908,6 +119904,10 @@ type RegionHealthChecksPatchCall struct { // Patch: Updates a HealthCheck resource in the specified project using // the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. +// +// - healthCheck: Name of the HealthCheck resource to patch. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112963,7 +119963,7 @@ func (c *RegionHealthChecksPatchCall) Header() http.Header { func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113095,6 +120095,10 @@ type RegionHealthChecksUpdateCall struct { // Update: Updates a HealthCheck resource in the specified project using // the data included in the request. +// +// - healthCheck: Name of the HealthCheck resource to update. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113150,7 +120154,7 @@ func (c *RegionHealthChecksUpdateCall) Header() http.Header { func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113297,6 +120301,10 @@ type RegionInstanceGroupManagersAbandonInstancesCall struct { // // You can specify a maximum of 1000 instances with this method per // request. +// +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113352,7 +120360,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113482,6 +120490,12 @@ type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { // ApplyUpdatesToInstances: Apply updates to selected instances the // managed instance group. +// +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113518,7 +120532,7 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.H func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113647,6 +120661,12 @@ type RegionInstanceGroupManagersCreateInstancesCall struct { // DONE if the createInstances request is successful. The underlying // actions take additional time. You must separately verify the status // of the creating or actions with the listmanagedinstances method. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the managed instance group is +// located. It should conform to RFC1035. func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113701,7 +120721,7 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113830,6 +120850,10 @@ type RegionInstanceGroupManagersDeleteCall struct { // Delete: Deletes the specified managed instance group and all of the // instances in that group. +// +// - instanceGroupManager: Name of the managed instance group to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -113884,7 +120908,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114021,6 +121045,10 @@ type RegionInstanceGroupManagersDeleteInstancesCall struct { // // You can specify a maximum of 1000 instances with this method per // request. +// +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114076,7 +121104,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114206,6 +121234,12 @@ type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { // DeletePerInstanceConfigs: Deletes selected per-instance configs for // the managed instance group. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114242,7 +121276,7 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http. func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114367,6 +121401,10 @@ type RegionInstanceGroupManagersGetCall struct { // Get: Returns all of the details about the specified managed instance // group. +// +// - instanceGroupManager: Name of the managed instance group to return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114412,7 +121450,7 @@ func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114539,6 +121577,9 @@ type RegionInstanceGroupManagersInsertCall struct { // listmanagedinstances method. // // A regional managed instance group can contain up to 2000 instances. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114593,7 +121634,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114714,6 +121755,9 @@ type RegionInstanceGroupManagersListCall struct { // List: Retrieves the list of managed instance groups that are // contained within the specified region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -114789,7 +121833,7 @@ func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *Regio // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionInstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -114832,7 +121876,7 @@ func (c *RegionInstanceGroupManagersListCall) Header() http.Header { func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114940,7 +121984,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -114995,6 +122039,14 @@ type RegionInstanceGroupManagersListErrorsCall struct { // ListErrors: Lists all errors thrown by actions on instances for a // given regional managed instance group. The filter and orderBy query // parameters are not supported. +// +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|[1-9][0-9]{0,19}. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. This should +// conform to RFC1035. func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115071,7 +122123,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionInstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListErrorsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -115114,7 +122166,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115232,7 +122284,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -115287,6 +122339,10 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its // instances. The orderBy query parameter is not supported. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115363,7 +122419,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToke // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionInstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -115396,7 +122452,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Head func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115511,7 +122567,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -115565,6 +122621,12 @@ type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { // ListPerInstanceConfigs: Lists all of the per-instance configs defined // for the managed instance group. The orderBy query parameter is not // supported. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115641,7 +122703,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageTo // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -115674,7 +122736,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.He func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115789,7 +122851,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -115848,6 +122910,10 @@ type RegionInstanceGroupManagersPatchCall struct { // the individual instances with the listmanagedinstances method. This // method supports PATCH semantics and uses the JSON merge patch format // and processing rules. +// +// - instanceGroupManager: The name of the instance group manager. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -115903,7 +122969,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116034,6 +123100,12 @@ type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { // PatchPerInstanceConfigs: Inserts or patches per-instance configs for // the managed instance group. perInstanceConfig.name serves as a key // used to distinguish whether to perform insert or patch. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116089,7 +123161,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.H func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116217,13 +123289,13 @@ type RegionInstanceGroupManagersRecreateInstancesCall struct { header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. +// RecreateInstances: Flags the specified VM instances in the managed +// instance group to be immediately recreated. Each instance is +// recreated using the group's current configuration. This operation is +// marked as DONE when the flag is set even if the instances have not +// yet been recreated. You must separately verify the status of each +// instance by checking its currentAction field; for more information, +// see Checking the status of managed instances. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining @@ -116232,6 +123304,10 @@ type RegionInstanceGroupManagersRecreateInstancesCall struct { // // You can specify a maximum of 1000 instances with this method per // request. +// +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116287,7 +123363,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116353,7 +123429,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ @@ -116427,6 +123503,12 @@ type RegionInstanceGroupManagersResizeCall struct { // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining // duration has elapsed before the VM instance is removed or deleted. +// +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - size: Number of instances that should exist in this instance group +// manager. func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116482,7 +123564,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116614,6 +123696,10 @@ type RegionInstanceGroupManagersSetInstanceTemplateCall struct { // SetInstanceTemplate: Sets the instance template to use when creating // new instances or recreating instances in this group. Existing // instances are not affected. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116669,7 +123755,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Heade func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116800,6 +123886,10 @@ type RegionInstanceGroupManagersSetTargetPoolsCall struct { // SetTargetPools: Modifies the target pools to which all new instances // in this group are assigned. Existing instances in the group are not // affected. +// +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116855,7 +123945,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116986,6 +124076,12 @@ type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { // UpdatePerInstanceConfigs: Inserts or updates per-instance configs for // the managed instance group. perInstanceConfig.name serves as a key // used to distinguish whether to perform insert or patch. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117041,7 +124137,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http. func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117170,6 +124266,10 @@ type RegionInstanceGroupsGetCall struct { } // Get: Returns the specified instance group resource. +// +// - instanceGroup: Name of the instance group resource to return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117215,7 +124315,7 @@ func (c *RegionInstanceGroupsGetCall) Header() http.Header { func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117335,6 +124435,9 @@ type RegionInstanceGroupsListCall struct { // List: Retrieves the list of instance group resources contained within // the specified region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117410,7 +124513,7 @@ func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstan // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -117453,7 +124556,7 @@ func (c *RegionInstanceGroupsListCall) Header() http.Header { func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117561,7 +124664,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -117618,6 +124721,11 @@ type RegionInstanceGroupsListInstancesCall struct { // specified options, this method can list all instances or only the // instances that are running. The orderBy query parameter is not // supported. +// +// - instanceGroup: Name of the regional instance group for which we +// want to list the instances. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117695,7 +124803,7 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -117728,7 +124836,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117847,7 +124955,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -117904,6 +125012,11 @@ type RegionInstanceGroupsSetNamedPortsCall struct { // SetNamedPorts: Sets the named ports for the specified regional // instance group. +// +// - instanceGroup: The name of the regional instance group where the +// named ports are updated. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117959,7 +125072,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118074,6 +125187,185 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } +// method id "compute.regionInstances.bulkInsert": + +type RegionInstancesBulkInsertCall struct { + s *Service + project string + region string + bulkinsertinstanceresource *BulkInsertInstanceResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BulkInsert: Creates multiple instances in a given region. Count +// specifies the number of instances to create. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionInstancesService) BulkInsert(project string, region string, bulkinsertinstanceresource *BulkInsertInstanceResource) *RegionInstancesBulkInsertCall { + c := &RegionInstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.bulkinsertinstanceresource = bulkinsertinstanceresource + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInstancesBulkInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInstancesBulkInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstancesBulkInsertCall) Context(ctx context.Context) *RegionInstancesBulkInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstancesBulkInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instances/bulkInsert") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstances.bulkInsert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", + // "httpMethod": "POST", + // "id": "compute.regionInstances.bulkInsert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instances/bulkInsert", + // "request": { + // "$ref": "BulkInsertInstanceResource" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionNetworkEndpointGroups.delete": type RegionNetworkEndpointGroupsDeleteCall struct { @@ -118089,6 +125381,12 @@ type RegionNetworkEndpointGroupsDeleteCall struct { // Delete: Deletes the specified network endpoint group. Note that the // NEG cannot be deleted if it is configured as a backend of a backend // service. +// +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Delete(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsDeleteCall { c := &RegionNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118143,7 +125441,7 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118265,6 +125563,12 @@ type RegionNetworkEndpointGroupsGetCall struct { // Get: Returns the specified network endpoint group. Gets a list of // available network endpoint groups by making a list() request. +// +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Get(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsGetCall { c := &RegionNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118310,7 +125614,7 @@ func (c *RegionNetworkEndpointGroupsGetCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118430,6 +125734,10 @@ type RegionNetworkEndpointGroupsInsertCall struct { // Insert: Creates a network endpoint group in the specified project // using the parameters that are included in the request. +// +// - project: Project ID for this request. +// - region: The name of the region where you want to create the network +// endpoint group. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Insert(project string, region string, networkendpointgroup *NetworkEndpointGroup) *RegionNetworkEndpointGroupsInsertCall { c := &RegionNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118484,7 +125792,7 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118605,6 +125913,10 @@ type RegionNetworkEndpointGroupsListCall struct { // List: Retrieves the list of regional network endpoint groups // available to the specified project in the given region. +// +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) List(project string, region string) *RegionNetworkEndpointGroupsListCall { c := &RegionNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118680,7 +125992,7 @@ func (c *RegionNetworkEndpointGroupsListCall) PageToken(pageToken string) *Regio // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -118723,7 +126035,7 @@ func (c *RegionNetworkEndpointGroupsListCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118831,7 +126143,7 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -118884,6 +126196,11 @@ type RegionNotificationEndpointsDeleteCall struct { // Delete: Deletes the specified NotificationEndpoint in the given // region +// +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118938,7 +126255,7 @@ func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119062,6 +126379,11 @@ type RegionNotificationEndpointsGetCall struct { // Get: Returns the specified NotificationEndpoint resource in the given // region. +// +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -119107,7 +126429,7 @@ func (c *RegionNotificationEndpointsGetCall) Header() http.Header { func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119229,6 +126551,9 @@ type RegionNotificationEndpointsInsertCall struct { // Insert: Create a NotificationEndpoint in the specified project in the // given region using the parameters that are included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -119283,7 +126608,7 @@ func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119405,6 +126730,9 @@ type RegionNotificationEndpointsListCall struct { // List: Lists the NotificationEndpoints for a project in the given // region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) List(project string, region string) *RegionNotificationEndpointsListCall { c := &RegionNotificationEndpointsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -119480,7 +126808,7 @@ func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *Regio // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionNotificationEndpointsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNotificationEndpointsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -119523,7 +126851,7 @@ func (c *RegionNotificationEndpointsListCall) Header() http.Header { func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119632,7 +126960,7 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -119684,6 +127012,10 @@ type RegionOperationsDeleteCall struct { } // Delete: Deletes the specified region-specific Operations resource. +// +// - operation: Name of the Operations resource to delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -119720,7 +127052,7 @@ func (c *RegionOperationsDeleteCall) Header() http.Header { func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119810,6 +127142,10 @@ type RegionOperationsGetCall struct { } // Get: Retrieves the specified region-specific Operations resource. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -119856,7 +127192,7 @@ func (c *RegionOperationsGetCall) Header() http.Header { func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119978,6 +127314,9 @@ type RegionOperationsListCall struct { // List: Retrieves a list of Operation resources contained within the // specified region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -120054,7 +127393,7 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionOperationsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -120097,7 +127436,7 @@ func (c *RegionOperationsListCall) Header() http.Header { func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120206,7 +127545,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -120271,6 +127610,10 @@ type RegionOperationsWaitCall struct { // - If the default deadline is reached, there is no guarantee that the // operation is actually done when the method returns. Be prepared to // retry if the operation is not `DONE`. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -120306,7 +127649,7 @@ func (c *RegionOperationsWaitCall) Header() http.Header { func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120424,6 +127767,10 @@ type RegionSslCertificatesDeleteCall struct { } // Delete: Deletes the specified SslCertificate resource in the region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslCertificate: Name of the SslCertificate resource to delete. func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -120478,7 +127825,7 @@ func (c *RegionSslCertificatesDeleteCall) Header() http.Header { func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120603,6 +127950,10 @@ type RegionSslCertificatesGetCall struct { // Get: Returns the specified SslCertificate resource in the specified // region. Get a list of available SSL certificates by making a list() // request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslCertificate: Name of the SslCertificate resource to return. func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -120648,7 +127999,7 @@ func (c *RegionSslCertificatesGetCall) Header() http.Header { func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120770,6 +128121,9 @@ type RegionSslCertificatesInsertCall struct { // Insert: Creates a SslCertificate resource in the specified project // and region using the data included in the request +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -120824,7 +128178,7 @@ func (c *RegionSslCertificatesInsertCall) Header() http.Header { func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120946,6 +128300,9 @@ type RegionSslCertificatesListCall struct { // List: Retrieves the list of SslCertificate resources available to the // specified project in the specified region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -121021,7 +128378,7 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -121064,7 +128421,7 @@ func (c *RegionSslCertificatesListCall) Header() http.Header { func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121173,7 +128530,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -121225,6 +128582,10 @@ type RegionTargetHttpProxiesDeleteCall struct { } // Delete: Deletes the specified TargetHttpProxy resource. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpProxy: Name of the TargetHttpProxy resource to delete. func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -121279,7 +128640,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121404,6 +128765,10 @@ type RegionTargetHttpProxiesGetCall struct { // Get: Returns the specified TargetHttpProxy resource in the specified // region. Gets a list of available target HTTP proxies by making a // list() request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpProxy: Name of the TargetHttpProxy resource to return. func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -121449,7 +128814,7 @@ func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121571,6 +128936,9 @@ type RegionTargetHttpProxiesInsertCall struct { // Insert: Creates a TargetHttpProxy resource in the specified project // and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -121625,7 +128993,7 @@ func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121747,6 +129115,9 @@ type RegionTargetHttpProxiesListCall struct { // List: Retrieves the list of TargetHttpProxy resources available to // the specified project in the specified region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -121822,7 +129193,7 @@ func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTar // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionTargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -121865,7 +129236,7 @@ func (c *RegionTargetHttpProxiesListCall) Header() http.Header { func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121974,7 +129345,7 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -122027,6 +129398,10 @@ type RegionTargetHttpProxiesSetUrlMapCall struct { } // SetUrlMap: Changes the URL map for TargetHttpProxy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpProxy: Name of the TargetHttpProxy to set a URL map for. func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122082,7 +129457,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122212,6 +129587,10 @@ type RegionTargetHttpsProxiesDeleteCall struct { } // Delete: Deletes the specified TargetHttpsProxy resource. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to delete. func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122266,7 +129645,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122391,6 +129770,10 @@ type RegionTargetHttpsProxiesGetCall struct { // Get: Returns the specified TargetHttpsProxy resource in the specified // region. Gets a list of available target HTTP proxies by making a // list() request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to return. func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122436,7 +129819,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122558,6 +129941,9 @@ type RegionTargetHttpsProxiesInsertCall struct { // Insert: Creates a TargetHttpsProxy resource in the specified project // and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122612,7 +129998,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122734,6 +130120,9 @@ type RegionTargetHttpsProxiesListCall struct { // List: Retrieves the list of TargetHttpsProxy resources available to // the specified project in the specified region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -122809,7 +130198,7 @@ func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionTargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -122852,7 +130241,7 @@ func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122961,7 +130350,7 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -123014,6 +130403,11 @@ type RegionTargetHttpsProxiesSetSslCertificatesCall struct { } // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123069,7 +130463,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123200,6 +130594,11 @@ type RegionTargetHttpsProxiesSetUrlMapCall struct { } // SetUrlMap: Changes the URL map for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy to set a URL map +// for. func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123255,7 +130654,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123385,6 +130784,10 @@ type RegionUrlMapsDeleteCall struct { } // Delete: Deletes the specified UrlMap resource. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - urlMap: Name of the UrlMap resource to delete. func (r *RegionUrlMapsService) Delete(project string, region string, urlMap string) *RegionUrlMapsDeleteCall { c := &RegionUrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123427,7 +130830,7 @@ func (c *RegionUrlMapsDeleteCall) Header() http.Header { func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123551,6 +130954,10 @@ type RegionUrlMapsGetCall struct { // Get: Returns the specified UrlMap resource. Gets a list of available // URL maps by making a list() request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - urlMap: Name of the UrlMap resource to return. func (r *RegionUrlMapsService) Get(project string, region string, urlMap string) *RegionUrlMapsGetCall { c := &RegionUrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123596,7 +131003,7 @@ func (c *RegionUrlMapsGetCall) Header() http.Header { func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123718,6 +131125,9 @@ type RegionUrlMapsInsertCall struct { // Insert: Creates a UrlMap resource in the specified project using the // data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionUrlMapsService) Insert(project string, region string, urlmap *UrlMap) *RegionUrlMapsInsertCall { c := &RegionUrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123760,7 +131170,7 @@ func (c *RegionUrlMapsInsertCall) Header() http.Header { func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123882,6 +131292,9 @@ type RegionUrlMapsListCall struct { // List: Retrieves the list of UrlMap resources available to the // specified project in the specified region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMapsListCall { c := &RegionUrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -123957,7 +131370,7 @@ func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionUrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionUrlMapsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -124000,7 +131413,7 @@ func (c *RegionUrlMapsListCall) Header() http.Header { func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124109,7 +131522,7 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -124164,6 +131577,10 @@ type RegionUrlMapsPatchCall struct { // Patch: Patches the specified UrlMap resource with the data included // in the request. This method supports PATCH semantics and uses JSON // merge patch format and processing rules. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - urlMap: Name of the UrlMap resource to patch. func (r *RegionUrlMapsService) Patch(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsPatchCall { c := &RegionUrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124207,7 +131624,7 @@ func (c *RegionUrlMapsPatchCall) Header() http.Header { func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124339,6 +131756,10 @@ type RegionUrlMapsUpdateCall struct { // Update: Updates the specified UrlMap resource with the data included // in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - urlMap: Name of the UrlMap resource to update. func (r *RegionUrlMapsService) Update(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsUpdateCall { c := &RegionUrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124382,7 +131803,7 @@ func (c *RegionUrlMapsUpdateCall) Header() http.Header { func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124515,6 +131936,10 @@ type RegionUrlMapsValidateCall struct { // Validate: Runs static validation for the UrlMap. In particular, the // tests of the provided UrlMap will be run. Calling this method does // NOT create the UrlMap. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - urlMap: Name of the UrlMap resource to be validated as. func (r *RegionUrlMapsService) Validate(project string, region string, urlMap string, regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest) *RegionUrlMapsValidateCall { c := &RegionUrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124551,7 +131976,7 @@ func (c *RegionUrlMapsValidateCall) Header() http.Header { func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124677,6 +132102,9 @@ type RegionsGetCall struct { // Get: Returns the specified Region resource. Gets a list of available // regions by making a list() request. +// +// - project: Project ID for this request. +// - region: Name of the region resource to return. // For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get func (r *RegionsService) Get(project string, region string) *RegionsGetCall { c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -124722,7 +132150,7 @@ func (c *RegionsGetCall) Header() http.Header { func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124834,6 +132262,8 @@ type RegionsListCall struct { // List: Retrieves the list of region resources available to the // specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list func (r *RegionsService) List(project string) *RegionsListCall { c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -124909,7 +132339,7 @@ func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RegionsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -124952,7 +132382,7 @@ func (c *RegionsListCall) Header() http.Header { func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125052,7 +132482,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -125103,6 +132533,8 @@ type ReservationsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of reservations. +// +// - project: Project ID for this request. func (r *ReservationsService) AggregatedList(project string) *ReservationsAggregatedListCall { c := &ReservationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125190,7 +132622,7 @@ func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *Reservatio // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ReservationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -125233,7 +132665,7 @@ func (c *ReservationsAggregatedListCall) Header() http.Header { func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125338,7 +132770,7 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -125390,6 +132822,10 @@ type ReservationsDeleteCall struct { } // Delete: Deletes the specified reservation. +// +// - project: Project ID for this request. +// - reservation: Name of the reservation to delete. +// - zone: Name of the zone for this request. func (r *ReservationsService) Delete(project string, zone string, reservation string) *ReservationsDeleteCall { c := &ReservationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125444,7 +132880,7 @@ func (c *ReservationsDeleteCall) Header() http.Header { func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125567,6 +133003,10 @@ type ReservationsGetCall struct { } // Get: Retrieves information about the specified reservation. +// +// - project: Project ID for this request. +// - reservation: Name of the reservation to retrieve. +// - zone: Name of the zone for this request. func (r *ReservationsService) Get(project string, zone string, reservation string) *ReservationsGetCall { c := &ReservationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125612,7 +133052,7 @@ func (c *ReservationsGetCall) Header() http.Header { func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125735,6 +133175,10 @@ type ReservationsGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *ReservationsService) GetIamPolicy(project string, zone string, resource string) *ReservationsGetIamPolicyCall { c := &ReservationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125787,7 +133231,7 @@ func (c *ReservationsGetIamPolicyCall) Header() http.Header { func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125915,6 +133359,9 @@ type ReservationsInsertCall struct { // Insert: Creates a new reservation. For more information, read // Reserving zonal resources. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *ReservationsService) Insert(project string, zone string, reservation *Reservation) *ReservationsInsertCall { c := &ReservationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125969,7 +133416,7 @@ func (c *ReservationsInsertCall) Header() http.Header { func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126091,6 +133538,9 @@ type ReservationsListCall struct { // List: A list of all the reservations that have been configured for // the specified project in specified zone. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *ReservationsService) List(project string, zone string) *ReservationsListCall { c := &ReservationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126166,7 +133616,7 @@ func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ReservationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -126209,7 +133659,7 @@ func (c *ReservationsListCall) Header() http.Header { func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126311,7 +133761,7 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -126373,6 +133823,10 @@ type ReservationsResizeCall struct { // Resize: Resizes the reservation (applicable to standalone // reservations only). For more information, read Modifying // reservations. +// +// - project: Project ID for this request. +// - reservation: Name of the reservation to update. +// - zone: Name of the zone for this request. func (r *ReservationsService) Resize(project string, zone string, reservation string, reservationsresizerequest *ReservationsResizeRequest) *ReservationsResizeCall { c := &ReservationsResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126428,7 +133882,7 @@ func (c *ReservationsResizeCall) Header() http.Header { func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126560,6 +134014,10 @@ type ReservationsSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *ReservationsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *ReservationsSetIamPolicyCall { c := &ReservationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126596,7 +134054,7 @@ func (c *ReservationsSetIamPolicyCall) Header() http.Header { func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126723,6 +134181,10 @@ type ReservationsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. func (r *ReservationsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *ReservationsTestIamPermissionsCall { c := &ReservationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126759,7 +134221,7 @@ func (c *ReservationsTestIamPermissionsCall) Header() http.Header { func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126884,6 +134346,8 @@ type ResourcePoliciesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of resource policies. +// +// - project: Project ID for this request. func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePoliciesAggregatedListCall { c := &ResourcePoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126971,7 +134435,7 @@ func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *Resour // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ResourcePoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -127014,7 +134478,7 @@ func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127119,7 +134583,7 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -127171,6 +134635,10 @@ type ResourcePoliciesDeleteCall struct { } // Delete: Deletes the specified resource policy. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - resourcePolicy: Name of the resource policy to delete. func (r *ResourcePoliciesService) Delete(project string, region string, resourcePolicy string) *ResourcePoliciesDeleteCall { c := &ResourcePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127225,7 +134693,7 @@ func (c *ResourcePoliciesDeleteCall) Header() http.Header { func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127348,6 +134816,10 @@ type ResourcePoliciesGetCall struct { } // Get: Retrieves all information of the specified resource policy. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - resourcePolicy: Name of the resource policy to retrieve. func (r *ResourcePoliciesService) Get(project string, region string, resourcePolicy string) *ResourcePoliciesGetCall { c := &ResourcePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127393,7 +134865,7 @@ func (c *ResourcePoliciesGetCall) Header() http.Header { func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127516,6 +134988,10 @@ type ResourcePoliciesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *ResourcePoliciesService) GetIamPolicy(project string, region string, resource string) *ResourcePoliciesGetIamPolicyCall { c := &ResourcePoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127568,7 +135044,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127695,6 +135171,9 @@ type ResourcePoliciesInsertCall struct { } // Insert: Creates a new resource policy. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *ResourcePoliciesService) Insert(project string, region string, resourcepolicy *ResourcePolicy) *ResourcePoliciesInsertCall { c := &ResourcePoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127749,7 +135228,7 @@ func (c *ResourcePoliciesInsertCall) Header() http.Header { func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127871,6 +135350,9 @@ type ResourcePoliciesListCall struct { // List: A list all the resource policies that have been configured for // the specified project in specified region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *ResourcePoliciesService) List(project string, region string) *ResourcePoliciesListCall { c := &ResourcePoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127946,7 +135428,7 @@ func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePolicies // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ResourcePoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -127989,7 +135471,7 @@ func (c *ResourcePoliciesListCall) Header() http.Header { func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128098,7 +135580,7 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -128152,6 +135634,10 @@ type ResourcePoliciesSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *ResourcePoliciesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *ResourcePoliciesSetIamPolicyCall { c := &ResourcePoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128188,7 +135674,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128315,6 +135801,10 @@ type ResourcePoliciesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *ResourcePoliciesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ResourcePoliciesTestIamPermissionsCall { c := &ResourcePoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128351,7 +135841,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128476,6 +135966,8 @@ type RoutersAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of routers. +// +// - project: Project ID for this request. func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128563,7 +136055,7 @@ func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregat // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RoutersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -128606,7 +136098,7 @@ func (c *RoutersAggregatedListCall) Header() http.Header { func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128711,7 +136203,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -128763,6 +136255,10 @@ type RoutersDeleteCall struct { } // Delete: Deletes the specified Router resource. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to delete. func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128817,7 +136313,7 @@ func (c *RoutersDeleteCall) Header() http.Header { func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128941,6 +136437,10 @@ type RoutersGetCall struct { // Get: Returns the specified Router resource. Gets a list of available // routers by making a list() request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to return. func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -128986,7 +136486,7 @@ func (c *RoutersGetCall) Header() http.Header { func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129109,6 +136609,11 @@ type RoutersGetNatMappingInfoCall struct { // GetNatMappingInfo: Retrieves runtime Nat mapping information of VM // endpoints. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat Mapping +// information of VM endpoints. func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129185,7 +136690,7 @@ func (c *RoutersGetNatMappingInfoCall) PageToken(pageToken string) *RoutersGetNa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RoutersGetNatMappingInfoCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -129228,7 +136733,7 @@ func (c *RoutersGetNatMappingInfoCall) Header() http.Header { func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129339,7 +136844,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -129400,6 +136905,10 @@ type RoutersGetRouterStatusCall struct { // GetRouterStatus: Retrieves runtime information of the specified // router. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query. func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129445,7 +136954,7 @@ func (c *RoutersGetRouterStatusCall) Header() http.Header { func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129567,6 +137076,9 @@ type RoutersInsertCall struct { // Insert: Creates a Router resource in the specified project and region // using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129621,7 +137133,7 @@ func (c *RoutersInsertCall) Header() http.Header { func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129743,6 +137255,9 @@ type RoutersListCall struct { // List: Retrieves a list of Router resources available to the specified // project. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RoutersService) List(project string, region string) *RoutersListCall { c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129818,7 +137333,7 @@ func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RoutersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -129861,7 +137376,7 @@ func (c *RoutersListCall) Header() http.Header { func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129970,7 +137485,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -130025,6 +137540,10 @@ type RoutersPatchCall struct { // Patch: Patches the specified Router resource with the data included // in the request. This method supports PATCH semantics and uses JSON // merge patch format and processing rules. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to patch. func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -130080,7 +137599,7 @@ func (c *RoutersPatchCall) Header() http.Header { func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130213,6 +137732,10 @@ type RoutersPreviewCall struct { // Preview: Preview fields auto-generated during router create and // update operations. Calling this method does NOT create or update the // router. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query. func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -130249,7 +137772,7 @@ func (c *RoutersPreviewCall) Header() http.Header { func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130380,6 +137903,10 @@ type RoutersUpdateCall struct { // that the state of the target resource be created or replaced with the // state defined by the representation enclosed in the request message // payload. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to update. func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -130435,7 +137962,7 @@ func (c *RoutersUpdateCall) Header() http.Header { func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130564,6 +138091,9 @@ type RoutesDeleteCall struct { } // Delete: Deletes the specified Route resource. +// +// - project: Project ID for this request. +// - route: Name of the Route resource to delete. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -130618,7 +138148,7 @@ func (c *RoutesDeleteCall) Header() http.Header { func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130732,6 +138262,9 @@ type RoutesGetCall struct { // Get: Returns the specified Route resource. Gets a list of available // routes by making a list() request. +// +// - project: Project ID for this request. +// - route: Name of the Route resource to return. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get func (r *RoutesService) Get(project string, route string) *RoutesGetCall { c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -130777,7 +138310,7 @@ func (c *RoutesGetCall) Header() http.Header { func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130889,6 +138422,8 @@ type RoutesInsertCall struct { // Insert: Creates a Route resource in the specified project using the // data included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -130943,7 +138478,7 @@ func (c *RoutesInsertCall) Header() http.Header { func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131055,6 +138590,8 @@ type RoutesListCall struct { // List: Retrieves the list of Route resources available to the // specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list func (r *RoutesService) List(project string) *RoutesListCall { c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -131130,7 +138667,7 @@ func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *RoutesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -131173,7 +138710,7 @@ func (c *RoutesListCall) Header() http.Header { func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131273,7 +138810,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -131325,6 +138862,9 @@ type SecurityPoliciesAddRuleCall struct { } // AddRule: Inserts a rule into a security policy. +// +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to update. func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesAddRuleCall { c := &SecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -131360,7 +138900,7 @@ func (c *SecurityPoliciesAddRuleCall) Header() http.Header { func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131475,6 +139015,9 @@ type SecurityPoliciesDeleteCall struct { } // Delete: Deletes the specified policy. +// +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to delete. func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -131528,7 +139071,7 @@ func (c *SecurityPoliciesDeleteCall) Header() http.Header { func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131642,6 +139185,9 @@ type SecurityPoliciesGetCall struct { // Get: List all of the ordered rules present in a single specified // policy. +// +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to get. func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -131686,7 +139232,7 @@ func (c *SecurityPoliciesGetCall) Header() http.Header { func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131798,6 +139344,10 @@ type SecurityPoliciesGetRuleCall struct { } // GetRule: Gets a rule at the specified priority. +// +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to which the queried +// rule belongs. func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -131849,7 +139399,7 @@ func (c *SecurityPoliciesGetRuleCall) Header() http.Header { func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131967,6 +139517,8 @@ type SecurityPoliciesInsertCall struct { // Insert: Creates a new policy in the specified project using the data // included in the request. +// +// - project: Project ID for this request. func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -132020,7 +139572,7 @@ func (c *SecurityPoliciesInsertCall) Header() http.Header { func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132132,6 +139684,8 @@ type SecurityPoliciesListCall struct { // List: List all the policies that have been configured for the // specified project. +// +// - project: Project ID for this request. func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -132206,7 +139760,7 @@ func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPolicies // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -132249,7 +139803,7 @@ func (c *SecurityPoliciesListCall) Header() http.Header { func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132349,7 +139903,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -132401,6 +139955,8 @@ type SecurityPoliciesListPreconfiguredExpressionSetsCall struct { // ListPreconfiguredExpressionSets: Gets the current list of // preconfigured Web Application Firewall (WAF) expressions. +// +// - project: Project ID for this request. func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c := &SecurityPoliciesListPreconfiguredExpressionSetsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -132475,7 +140031,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToke // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -132518,7 +140074,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Head func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132621,7 +140177,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -132654,6 +140210,9 @@ type SecurityPoliciesPatchCall struct { // request. This cannot be used to be update the rules in the policy. // Please use the per rule methods like addRule, patchRule, and // removeRule instead. +// +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to update. func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -132708,7 +140267,7 @@ func (c *SecurityPoliciesPatchCall) Header() http.Header { func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132829,6 +140388,9 @@ type SecurityPoliciesPatchRuleCall struct { } // PatchRule: Patches a rule at the specified priority. +// +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to update. func (r *SecurityPoliciesService) PatchRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesPatchRuleCall { c := &SecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -132871,7 +140433,7 @@ func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132992,6 +140554,9 @@ type SecurityPoliciesRemoveRuleCall struct { } // RemoveRule: Deletes a rule at the specified priority. +// +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to update. func (r *SecurityPoliciesService) RemoveRule(project string, securityPolicy string) *SecurityPoliciesRemoveRuleCall { c := &SecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -133033,7 +140598,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133152,6 +140717,9 @@ type SnapshotsDeleteCall struct { // to the next corresponding snapshot. // // For more information, see Deleting snapshots. +// +// - project: Project ID for this request. +// - snapshot: Name of the Snapshot resource to delete. // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -133206,7 +140774,7 @@ func (c *SnapshotsDeleteCall) Header() http.Header { func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133320,6 +140888,9 @@ type SnapshotsGetCall struct { // Get: Returns the specified Snapshot resource. Gets a list of // available snapshots by making a list() request. +// +// - project: Project ID for this request. +// - snapshot: Name of the Snapshot resource to return. // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -133365,7 +140936,7 @@ func (c *SnapshotsGetCall) Header() http.Header { func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133478,6 +141049,9 @@ type SnapshotsGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *SnapshotsService) GetIamPolicy(project string, resource string) *SnapshotsGetIamPolicyCall { c := &SnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -133529,7 +141103,7 @@ func (c *SnapshotsGetIamPolicyCall) Header() http.Header { func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133647,6 +141221,8 @@ type SnapshotsListCall struct { // List: Retrieves the list of Snapshot resources contained within the // specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list func (r *SnapshotsService) List(project string) *SnapshotsListCall { c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -133722,7 +141298,7 @@ func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SnapshotsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -133765,7 +141341,7 @@ func (c *SnapshotsListCall) Header() http.Header { func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133865,7 +141441,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -133918,6 +141494,9 @@ type SnapshotsSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *SnapshotsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *SnapshotsSetIamPolicyCall { c := &SnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -133953,7 +141532,7 @@ func (c *SnapshotsSetIamPolicyCall) Header() http.Header { func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134070,6 +141649,9 @@ type SnapshotsSetLabelsCall struct { // SetLabels: Sets the labels on a snapshot. To learn more about labels, // read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134105,7 +141687,7 @@ func (c *SnapshotsSetLabelsCall) Header() http.Header { func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134222,6 +141804,9 @@ type SnapshotsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134257,7 +141842,7 @@ func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134374,6 +141959,8 @@ type SslCertificatesAggregatedListCall struct { // AggregatedList: Retrieves the list of all SslCertificate resources, // regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. func (r *SslCertificatesService) AggregatedList(project string) *SslCertificatesAggregatedListCall { c := &SslCertificatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134461,7 +142048,7 @@ func (c *SslCertificatesAggregatedListCall) PageToken(pageToken string) *SslCert // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SslCertificatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -134504,7 +142091,7 @@ func (c *SslCertificatesAggregatedListCall) Header() http.Header { func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134609,7 +142196,7 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -134660,6 +142247,9 @@ type SslCertificatesDeleteCall struct { } // Delete: Deletes the specified SslCertificate resource. +// +// - project: Project ID for this request. +// - sslCertificate: Name of the SslCertificate resource to delete. func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134713,7 +142303,7 @@ func (c *SslCertificatesDeleteCall) Header() http.Header { func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134827,6 +142417,9 @@ type SslCertificatesGetCall struct { // Get: Returns the specified SslCertificate resource. Gets a list of // available SSL certificates by making a list() request. +// +// - project: Project ID for this request. +// - sslCertificate: Name of the SslCertificate resource to return. func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134871,7 +142464,7 @@ func (c *SslCertificatesGetCall) Header() http.Header { func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134983,6 +142576,8 @@ type SslCertificatesInsertCall struct { // Insert: Creates a SslCertificate resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135036,7 +142631,7 @@ func (c *SslCertificatesInsertCall) Header() http.Header { func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135148,6 +142743,8 @@ type SslCertificatesListCall struct { // List: Retrieves the list of SslCertificate resources available to the // specified project. +// +// - project: Project ID for this request. func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135222,7 +142819,7 @@ func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -135265,7 +142862,7 @@ func (c *SslCertificatesListCall) Header() http.Header { func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135365,7 +142962,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -135418,6 +143015,10 @@ type SslPoliciesDeleteCall struct { // Delete: Deletes the specified SSL policy. The SSL policy resource can // be deleted only if it is not in use by any TargetHttpsProxy or // TargetSslProxy resources. +// +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135471,7 +143072,7 @@ func (c *SslPoliciesDeleteCall) Header() http.Header { func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135584,6 +143185,10 @@ type SslPoliciesGetCall struct { // Get: Lists all of the ordered rules present in a single specified // policy. +// +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135628,7 +143233,7 @@ func (c *SslPoliciesGetCall) Header() http.Header { func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135739,6 +143344,8 @@ type SslPoliciesInsertCall struct { // Insert: Returns the specified SSL policy resource. Gets a list of // available SSL policies by making a list() request. +// +// - project: Project ID for this request. func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { c := &SslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135792,7 +143399,7 @@ func (c *SslPoliciesInsertCall) Header() http.Header { func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135904,6 +143511,8 @@ type SslPoliciesListCall struct { // List: Lists all the SSL policies that have been configured for the // specified project. +// +// - project: Project ID for this request. func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { c := &SslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135978,7 +143587,7 @@ func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SslPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -136021,7 +143630,7 @@ func (c *SslPoliciesListCall) Header() http.Header { func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136121,7 +143730,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -136173,6 +143782,8 @@ type SslPoliciesListAvailableFeaturesCall struct { // ListAvailableFeatures: Lists all features that can be specified in // the SSL policy when using custom profile. +// +// - project: Project ID for this request. func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136247,7 +143858,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslP // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SslPoliciesListAvailableFeaturesCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -136290,7 +143901,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136392,7 +144003,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -136424,6 +144035,10 @@ type SslPoliciesPatchCall struct { // Patch: Patches the specified SSL policy with the data included in the // request. +// +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136478,7 +144093,7 @@ func (c *SslPoliciesPatchCall) Header() http.Header { func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136597,6 +144212,8 @@ type SubnetworksAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of subnetworks. +// +// - project: Project ID for this request. func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136684,7 +144301,7 @@ func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *Subnetworks // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SubnetworksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -136727,7 +144344,7 @@ func (c *SubnetworksAggregatedListCall) Header() http.Header { func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136832,7 +144449,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -136884,6 +144501,10 @@ type SubnetworksDeleteCall struct { } // Delete: Deletes the specified subnetwork. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - subnetwork: Name of the Subnetwork resource to delete. func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136938,7 +144559,7 @@ func (c *SubnetworksDeleteCall) Header() http.Header { func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137062,6 +144683,10 @@ type SubnetworksExpandIpCidrRangeCall struct { // ExpandIpCidrRange: Expands the IP CIDR range of the subnetwork to a // specified value. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - subnetwork: Name of the Subnetwork resource to update. func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, subnetwork string, subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest) *SubnetworksExpandIpCidrRangeCall { c := &SubnetworksExpandIpCidrRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -137117,7 +144742,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137249,6 +144874,10 @@ type SubnetworksGetCall struct { // Get: Returns the specified subnetwork. Gets a list of available // subnetworks list() request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - subnetwork: Name of the Subnetwork resource to return. func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -137294,7 +144923,7 @@ func (c *SubnetworksGetCall) Header() http.Header { func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137417,6 +145046,10 @@ type SubnetworksGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *SubnetworksService) GetIamPolicy(project string, region string, resource string) *SubnetworksGetIamPolicyCall { c := &SubnetworksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -137469,7 +145102,7 @@ func (c *SubnetworksGetIamPolicyCall) Header() http.Header { func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137597,6 +145230,9 @@ type SubnetworksInsertCall struct { // Insert: Creates a subnetwork in the specified project using the data // included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -137651,7 +145287,7 @@ func (c *SubnetworksInsertCall) Header() http.Header { func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137773,6 +145409,9 @@ type SubnetworksListCall struct { // List: Retrieves a list of subnetworks available to the specified // project. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -137848,7 +145487,7 @@ func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SubnetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -137891,7 +145530,7 @@ func (c *SubnetworksListCall) Header() http.Header { func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138000,7 +145639,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -138052,6 +145691,8 @@ type SubnetworksListUsableCall struct { // ListUsable: Retrieves an aggregated list of all usable subnetworks in // the project. +// +// - project: Project ID for this request. func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138126,7 +145767,7 @@ func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksList // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *SubnetworksListUsableCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListUsableCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -138169,7 +145810,7 @@ func (c *SubnetworksListUsableCall) Header() http.Header { func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138269,7 +145910,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -138325,6 +145966,10 @@ type SubnetworksPatchCall struct { // request. Only certain fields can be updated with a patch request as // indicated in the field descriptions. You must specify the current // fingerprint of the subnetwork resource being patched. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - subnetwork: Name of the Subnetwork resource to patch. func (r *SubnetworksService) Patch(project string, region string, subnetwork string, subnetwork2 *Subnetwork) *SubnetworksPatchCall { c := &SubnetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138395,7 +146040,7 @@ func (c *SubnetworksPatchCall) Header() http.Header { func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138533,6 +146178,10 @@ type SubnetworksSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *SubnetworksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *SubnetworksSetIamPolicyCall { c := &SubnetworksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138569,7 +146218,7 @@ func (c *SubnetworksSetIamPolicyCall) Header() http.Header { func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138697,6 +146346,10 @@ type SubnetworksSetPrivateIpGoogleAccessCall struct { // SetPrivateIpGoogleAccess: Set whether VMs in this subnet can access // Google services without assigning external IP addresses through // Private Google Access. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - subnetwork: Name of the Subnetwork resource. func (r *SubnetworksService) SetPrivateIpGoogleAccess(project string, region string, subnetwork string, subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest) *SubnetworksSetPrivateIpGoogleAccessCall { c := &SubnetworksSetPrivateIpGoogleAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138752,7 +146405,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138884,6 +146537,10 @@ type SubnetworksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138920,7 +146577,7 @@ func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139045,6 +146702,9 @@ type TargetGrpcProxiesDeleteCall struct { } // Delete: Deletes the specified TargetGrpcProxy in the given scope +// +// - project: Project ID for this request. +// - targetGrpcProxy: Name of the TargetGrpcProxy resource to delete. func (r *TargetGrpcProxiesService) Delete(project string, targetGrpcProxy string) *TargetGrpcProxiesDeleteCall { c := &TargetGrpcProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139098,7 +146758,7 @@ func (c *TargetGrpcProxiesDeleteCall) Header() http.Header { func (c *TargetGrpcProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139212,6 +146872,9 @@ type TargetGrpcProxiesGetCall struct { // Get: Returns the specified TargetGrpcProxy resource in the given // scope. +// +// - project: Project ID for this request. +// - targetGrpcProxy: Name of the TargetGrpcProxy resource to return. func (r *TargetGrpcProxiesService) Get(project string, targetGrpcProxy string) *TargetGrpcProxiesGetCall { c := &TargetGrpcProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139256,7 +146919,7 @@ func (c *TargetGrpcProxiesGetCall) Header() http.Header { func (c *TargetGrpcProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139368,6 +147031,8 @@ type TargetGrpcProxiesInsertCall struct { // Insert: Creates a TargetGrpcProxy in the specified project in the // given scope using the parameters that are included in the request. +// +// - project: Project ID for this request. func (r *TargetGrpcProxiesService) Insert(project string, targetgrpcproxy *TargetGrpcProxy) *TargetGrpcProxiesInsertCall { c := &TargetGrpcProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139421,7 +147086,7 @@ func (c *TargetGrpcProxiesInsertCall) Header() http.Header { func (c *TargetGrpcProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139532,6 +147197,8 @@ type TargetGrpcProxiesListCall struct { } // List: Lists the TargetGrpcProxies for a project in the given scope. +// +// - project: Project ID for this request. func (r *TargetGrpcProxiesService) List(project string) *TargetGrpcProxiesListCall { c := &TargetGrpcProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139606,7 +147273,7 @@ func (c *TargetGrpcProxiesListCall) PageToken(pageToken string) *TargetGrpcProxi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetGrpcProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetGrpcProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -139649,7 +147316,7 @@ func (c *TargetGrpcProxiesListCall) Header() http.Header { func (c *TargetGrpcProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139749,7 +147416,7 @@ func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrp // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -139803,6 +147470,9 @@ type TargetGrpcProxiesPatchCall struct { // Patch: Patches the specified TargetGrpcProxy resource with the data // included in the request. This method supports PATCH semantics and // uses JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - targetGrpcProxy: Name of the TargetGrpcProxy resource to patch. func (r *TargetGrpcProxiesService) Patch(project string, targetGrpcProxy string, targetgrpcproxy *TargetGrpcProxy) *TargetGrpcProxiesPatchCall { c := &TargetGrpcProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139857,7 +147527,7 @@ func (c *TargetGrpcProxiesPatchCall) Header() http.Header { func (c *TargetGrpcProxiesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139978,6 +147648,8 @@ type TargetHttpProxiesAggregatedListCall struct { // AggregatedList: Retrieves the list of all TargetHttpProxy resources, // regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpProxiesAggregatedListCall { c := &TargetHttpProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -140065,7 +147737,7 @@ func (c *TargetHttpProxiesAggregatedListCall) PageToken(pageToken string) *Targe // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetHttpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -140108,7 +147780,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140213,7 +147885,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -140264,6 +147936,9 @@ type TargetHttpProxiesDeleteCall struct { } // Delete: Deletes the specified TargetHttpProxy resource. +// +// - project: Project ID for this request. +// - targetHttpProxy: Name of the TargetHttpProxy resource to delete. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -140318,7 +147993,7 @@ func (c *TargetHttpProxiesDeleteCall) Header() http.Header { func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140432,6 +148107,9 @@ type TargetHttpProxiesGetCall struct { // Get: Returns the specified TargetHttpProxy resource. Gets a list of // available target HTTP proxies by making a list() request. +// +// - project: Project ID for this request. +// - targetHttpProxy: Name of the TargetHttpProxy resource to return. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -140477,7 +148155,7 @@ func (c *TargetHttpProxiesGetCall) Header() http.Header { func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140589,6 +148267,8 @@ type TargetHttpProxiesInsertCall struct { // Insert: Creates a TargetHttpProxy resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -140643,7 +148323,7 @@ func (c *TargetHttpProxiesInsertCall) Header() http.Header { func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140755,6 +148435,8 @@ type TargetHttpProxiesListCall struct { // List: Retrieves the list of TargetHttpProxy resources available to // the specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -140830,7 +148512,7 @@ func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -140873,7 +148555,7 @@ func (c *TargetHttpProxiesListCall) Header() http.Header { func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140973,7 +148655,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -141028,6 +148710,9 @@ type TargetHttpProxiesPatchCall struct { // included in the request. This method supports PATCH semantics and // uses JSON merge patch format and processing rules. (== // suppress_warning http-rest-shadowed ==) +// +// - project: Project ID for this request. +// - targetHttpProxy: Name of the TargetHttpProxy resource to patch. func (r *TargetHttpProxiesService) Patch(project string, targetHttpProxy string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesPatchCall { c := &TargetHttpProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141082,7 +148767,7 @@ func (c *TargetHttpProxiesPatchCall) Header() http.Header { func (c *TargetHttpProxiesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141203,6 +148888,9 @@ type TargetHttpProxiesSetUrlMapCall struct { } // SetUrlMap: Changes the URL map for TargetHttpProxy. +// +// - project: Project ID for this request. +// - targetHttpProxy: Name of the TargetHttpProxy to set a URL map for. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -141258,7 +148946,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141379,6 +149067,8 @@ type TargetHttpsProxiesAggregatedListCall struct { // AggregatedList: Retrieves the list of all TargetHttpsProxy resources, // regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsProxiesAggregatedListCall { c := &TargetHttpsProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141466,7 +149156,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) PageToken(pageToken string) *Targ // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetHttpsProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -141509,7 +149199,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141614,7 +149304,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -141665,6 +149355,9 @@ type TargetHttpsProxiesDeleteCall struct { } // Delete: Deletes the specified TargetHttpsProxy resource. +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to delete. func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141718,7 +149411,7 @@ func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141832,6 +149525,9 @@ type TargetHttpsProxiesGetCall struct { // Get: Returns the specified TargetHttpsProxy resource. Gets a list of // available target HTTPS proxies by making a list() request. +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to return. func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141876,7 +149572,7 @@ func (c *TargetHttpsProxiesGetCall) Header() http.Header { func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141988,6 +149684,8 @@ type TargetHttpsProxiesInsertCall struct { // Insert: Creates a TargetHttpsProxy resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142041,7 +149739,7 @@ func (c *TargetHttpsProxiesInsertCall) Header() http.Header { func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142153,6 +149851,8 @@ type TargetHttpsProxiesListCall struct { // List: Retrieves the list of TargetHttpsProxy resources available to // the specified project. +// +// - project: Project ID for this request. func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142227,7 +149927,7 @@ func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsPro // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -142270,7 +149970,7 @@ func (c *TargetHttpsProxiesListCall) Header() http.Header { func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142370,7 +150070,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -142425,6 +150125,9 @@ type TargetHttpsProxiesPatchCall struct { // included in the request. This method supports PATCH semantics and // uses JSON merge patch format and processing rules. (== // suppress_warning http-rest-shadowed ==) +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to patch. func (r *TargetHttpsProxiesService) Patch(project string, targetHttpsProxy string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesPatchCall { c := &TargetHttpsProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142479,7 +150182,7 @@ func (c *TargetHttpsProxiesPatchCall) Header() http.Header { func (c *TargetHttpsProxiesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142600,6 +150303,10 @@ type TargetHttpsProxiesSetQuicOverrideCall struct { } // SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set the +// QUIC override policy for. The name should conform to RFC1035. func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142654,7 +150361,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142774,6 +150481,10 @@ type TargetHttpsProxiesSetSslCertificatesCall struct { } // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142828,7 +150539,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142953,6 +150664,11 @@ type TargetHttpsProxiesSetSslPolicyCall struct { // affects connections between clients and the HTTPS proxy load // balancer. They do not affect the connection between the load balancer // and the backends. +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -143007,7 +150723,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143127,6 +150843,10 @@ type TargetHttpsProxiesSetUrlMapCall struct { } // SetUrlMap: Changes the URL map for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose URL +// map is to be set. func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -143181,7 +150901,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143301,6 +151021,8 @@ type TargetInstancesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of target instances. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -143389,7 +151111,7 @@ func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetI // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetInstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -143432,7 +151154,7 @@ func (c *TargetInstancesAggregatedListCall) Header() http.Header { func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143537,7 +151259,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -143589,6 +151311,10 @@ type TargetInstancesDeleteCall struct { } // Delete: Deletes the specified TargetInstance resource. +// +// - project: Project ID for this request. +// - targetInstance: Name of the TargetInstance resource to delete. +// - zone: Name of the zone scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -143644,7 +151370,7 @@ func (c *TargetInstancesDeleteCall) Header() http.Header { func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143768,6 +151494,10 @@ type TargetInstancesGetCall struct { // Get: Returns the specified TargetInstance resource. Gets a list of // available target instances by making a list() request. +// +// - project: Project ID for this request. +// - targetInstance: Name of the TargetInstance resource to return. +// - zone: Name of the zone scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -143814,7 +151544,7 @@ func (c *TargetInstancesGetCall) Header() http.Header { func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143936,6 +151666,9 @@ type TargetInstancesInsertCall struct { // Insert: Creates a TargetInstance resource in the specified project // and zone using the data included in the request. +// +// - project: Project ID for this request. +// - zone: Name of the zone scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -143991,7 +151724,7 @@ func (c *TargetInstancesInsertCall) Header() http.Header { func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144113,6 +151846,9 @@ type TargetInstancesListCall struct { // List: Retrieves a list of TargetInstance resources available to the // specified project and zone. +// +// - project: Project ID for this request. +// - zone: Name of the zone scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -144189,7 +151925,7 @@ func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesLi // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetInstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -144232,7 +151968,7 @@ func (c *TargetInstancesListCall) Header() http.Header { func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144334,7 +152070,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -144394,6 +152130,10 @@ type TargetPoolsAddHealthCheckCall struct { } // AddHealthCheck: Adds health check URLs to a target pool. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the target pool to add a health check to. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -144450,7 +152190,7 @@ func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144581,6 +152321,10 @@ type TargetPoolsAddInstanceCall struct { } // AddInstance: Adds an instance to a target pool. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to add instances to. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -144637,7 +152381,7 @@ func (c *TargetPoolsAddInstanceCall) Header() http.Header { func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144766,6 +152510,8 @@ type TargetPoolsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of target pools. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -144854,7 +152600,7 @@ func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPools // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetPoolsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -144897,7 +152643,7 @@ func (c *TargetPoolsAggregatedListCall) Header() http.Header { func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145002,7 +152748,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -145054,6 +152800,10 @@ type TargetPoolsDeleteCall struct { } // Delete: Deletes the specified target pool. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to delete. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -145109,7 +152859,7 @@ func (c *TargetPoolsDeleteCall) Header() http.Header { func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145233,6 +152983,10 @@ type TargetPoolsGetCall struct { // Get: Returns the specified target pool. Gets a list of available // target pools by making a list() request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to return. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -145279,7 +153033,7 @@ func (c *TargetPoolsGetCall) Header() http.Header { func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145402,6 +153156,11 @@ type TargetPoolsGetHealthCall struct { // GetHealth: Gets the most recent health check results for each IP for // the instance that is referenced by the given target pool. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to which the queried +// instance belongs. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -145439,7 +153198,7 @@ func (c *TargetPoolsGetHealthCall) Header() http.Header { func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145566,6 +153325,9 @@ type TargetPoolsInsertCall struct { // Insert: Creates a target pool in the specified project and region // using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -145621,7 +153383,7 @@ func (c *TargetPoolsInsertCall) Header() http.Header { func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145743,6 +153505,9 @@ type TargetPoolsListCall struct { // List: Retrieves a list of target pools available to the specified // project and region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -145819,7 +153584,7 @@ func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetPoolsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -145862,7 +153627,7 @@ func (c *TargetPoolsListCall) Header() http.Header { func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145971,7 +153736,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -146024,6 +153789,10 @@ type TargetPoolsRemoveHealthCheckCall struct { } // RemoveHealthCheck: Removes health check URL from a target pool. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - targetPool: Name of the target pool to remove health checks from. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -146080,7 +153849,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146211,6 +153980,11 @@ type TargetPoolsRemoveInstanceCall struct { } // RemoveInstance: Removes instance URL from a target pool. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to remove instances +// from. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -146267,7 +154041,7 @@ func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146398,6 +154172,11 @@ type TargetPoolsSetBackupCall struct { } // SetBackup: Changes a backup target pool's configurations. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to set a backup pool +// for. // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -146461,7 +154240,7 @@ func (c *TargetPoolsSetBackupCall) Header() http.Header { func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146596,6 +154375,9 @@ type TargetSslProxiesDeleteCall struct { } // Delete: Deletes the specified TargetSslProxy resource. +// +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource to delete. func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) *TargetSslProxiesDeleteCall { c := &TargetSslProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -146649,7 +154431,7 @@ func (c *TargetSslProxiesDeleteCall) Header() http.Header { func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146763,6 +154545,9 @@ type TargetSslProxiesGetCall struct { // Get: Returns the specified TargetSslProxy resource. Gets a list of // available target SSL proxies by making a list() request. +// +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource to return. func (r *TargetSslProxiesService) Get(project string, targetSslProxy string) *TargetSslProxiesGetCall { c := &TargetSslProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -146807,7 +154592,7 @@ func (c *TargetSslProxiesGetCall) Header() http.Header { func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146919,6 +154704,8 @@ type TargetSslProxiesInsertCall struct { // Insert: Creates a TargetSslProxy resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetSslProxy) *TargetSslProxiesInsertCall { c := &TargetSslProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -146972,7 +154759,7 @@ func (c *TargetSslProxiesInsertCall) Header() http.Header { func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147084,6 +154871,8 @@ type TargetSslProxiesListCall struct { // List: Retrieves the list of TargetSslProxy resources available to the // specified project. +// +// - project: Project ID for this request. func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall { c := &TargetSslProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -147158,7 +154947,7 @@ func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxies // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetSslProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetSslProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -147201,7 +154990,7 @@ func (c *TargetSslProxiesListCall) Header() http.Header { func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147301,7 +155090,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -147353,6 +155142,10 @@ type TargetSslProxiesSetBackendServiceCall struct { } // SetBackendService: Changes the BackendService for TargetSslProxy. +// +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// BackendService resource is to be set. func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -147407,7 +155200,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147528,6 +155321,10 @@ type TargetSslProxiesSetProxyHeaderCall struct { } // SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. +// +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// ProxyHeader is to be set. func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -147582,7 +155379,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147703,6 +155500,10 @@ type TargetSslProxiesSetSslCertificatesCall struct { } // SetSslCertificates: Changes SslCertificates for TargetSslProxy. +// +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// SslCertificate resource is to be set. func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -147757,7 +155558,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147881,6 +155682,11 @@ type TargetSslProxiesSetSslPolicyCall struct { // specifies the server-side support for SSL features. This affects // connections between clients and the SSL proxy load balancer. They do // not affect the connection between the load balancer and the backends. +// +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -147935,7 +155741,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148054,6 +155860,9 @@ type TargetTcpProxiesDeleteCall struct { } // Delete: Deletes the specified TargetTcpProxy resource. +// +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource to delete. func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) *TargetTcpProxiesDeleteCall { c := &TargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -148107,7 +155916,7 @@ func (c *TargetTcpProxiesDeleteCall) Header() http.Header { func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148221,6 +156030,9 @@ type TargetTcpProxiesGetCall struct { // Get: Returns the specified TargetTcpProxy resource. Gets a list of // available target TCP proxies by making a list() request. +// +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource to return. func (r *TargetTcpProxiesService) Get(project string, targetTcpProxy string) *TargetTcpProxiesGetCall { c := &TargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -148265,7 +156077,7 @@ func (c *TargetTcpProxiesGetCall) Header() http.Header { func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148377,6 +156189,8 @@ type TargetTcpProxiesInsertCall struct { // Insert: Creates a TargetTcpProxy resource in the specified project // using the data included in the request. +// +// - project: Project ID for this request. func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetTcpProxy) *TargetTcpProxiesInsertCall { c := &TargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -148430,7 +156244,7 @@ func (c *TargetTcpProxiesInsertCall) Header() http.Header { func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148542,6 +156356,8 @@ type TargetTcpProxiesListCall struct { // List: Retrieves the list of TargetTcpProxy resources available to the // specified project. +// +// - project: Project ID for this request. func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall { c := &TargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -148616,7 +156432,7 @@ func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxies // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetTcpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetTcpProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -148659,7 +156475,7 @@ func (c *TargetTcpProxiesListCall) Header() http.Header { func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148759,7 +156575,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -148811,6 +156627,10 @@ type TargetTcpProxiesSetBackendServiceCall struct { } // SetBackendService: Changes the BackendService for TargetTcpProxy. +// +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// BackendService resource is to be set. func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -148865,7 +156685,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -148986,6 +156806,10 @@ type TargetTcpProxiesSetProxyHeaderCall struct { } // SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. +// +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// ProxyHeader is to be set. func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -149040,7 +156864,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149160,6 +156984,8 @@ type TargetVpnGatewaysAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of target VPN gateways. +// +// - project: Project ID for this request. func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -149247,7 +157073,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *Targe // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetVpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -149290,7 +157116,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149395,7 +157221,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -149447,6 +157273,10 @@ type TargetVpnGatewaysDeleteCall struct { } // Delete: Deletes the specified target VPN gateway. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - targetVpnGateway: Name of the target VPN gateway to delete. func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -149501,7 +157331,7 @@ func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149625,6 +157455,10 @@ type TargetVpnGatewaysGetCall struct { // Get: Returns the specified target VPN gateway. Gets a list of // available target VPN gateways by making a list() request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - targetVpnGateway: Name of the target VPN gateway to return. func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -149670,7 +157504,7 @@ func (c *TargetVpnGatewaysGetCall) Header() http.Header { func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149792,6 +157626,9 @@ type TargetVpnGatewaysInsertCall struct { // Insert: Creates a target VPN gateway in the specified project and // region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -149846,7 +157683,7 @@ func (c *TargetVpnGatewaysInsertCall) Header() http.Header { func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -149968,6 +157805,9 @@ type TargetVpnGatewaysListCall struct { // List: Retrieves a list of target VPN gateways available to the // specified project and region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -150043,7 +157883,7 @@ func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewa // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *TargetVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -150086,7 +157926,7 @@ func (c *TargetVpnGatewaysListCall) Header() http.Header { func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150195,7 +158035,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -150247,6 +158087,8 @@ type UrlMapsAggregatedListCall struct { // AggregatedList: Retrieves the list of all UrlMap resources, regional // and global, available to the specified project. +// +// - project: Name of the project scoping this request. func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCall { c := &UrlMapsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -150334,7 +158176,7 @@ func (c *UrlMapsAggregatedListCall) PageToken(pageToken string) *UrlMapsAggregat // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *UrlMapsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -150377,7 +158219,7 @@ func (c *UrlMapsAggregatedListCall) Header() http.Header { func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150482,7 +158324,7 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -150533,6 +158375,9 @@ type UrlMapsDeleteCall struct { } // Delete: Deletes the specified UrlMap resource. +// +// - project: Project ID for this request. +// - urlMap: Name of the UrlMap resource to delete. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -150587,7 +158432,7 @@ func (c *UrlMapsDeleteCall) Header() http.Header { func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150701,6 +158546,9 @@ type UrlMapsGetCall struct { // Get: Returns the specified UrlMap resource. Gets a list of available // URL maps by making a list() request. +// +// - project: Project ID for this request. +// - urlMap: Name of the UrlMap resource to return. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -150746,7 +158594,7 @@ func (c *UrlMapsGetCall) Header() http.Header { func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -150858,6 +158706,8 @@ type UrlMapsInsertCall struct { // Insert: Creates a UrlMap resource in the specified project using the // data included in the request. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -150912,7 +158762,7 @@ func (c *UrlMapsInsertCall) Header() http.Header { func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151028,6 +158878,9 @@ type UrlMapsInvalidateCacheCall struct { // // For more information, see Invalidating cached content // (/cdn/docs/invalidating-cached-content). +// +// - project: Project ID for this request. +// - urlMap: Name of the UrlMap scoping this request. func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -151082,7 +158935,7 @@ func (c *UrlMapsInvalidateCacheCall) Header() http.Header { func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151203,6 +159056,8 @@ type UrlMapsListCall struct { // List: Retrieves the list of UrlMap resources available to the // specified project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list func (r *UrlMapsService) List(project string) *UrlMapsListCall { c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -151278,7 +159133,7 @@ func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *UrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -151321,7 +159176,7 @@ func (c *UrlMapsListCall) Header() http.Header { func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151421,7 +159276,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -151475,6 +159330,9 @@ type UrlMapsPatchCall struct { // Patch: Patches the specified UrlMap resource with the data included // in the request. This method supports PATCH semantics and uses the // JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - urlMap: Name of the UrlMap resource to patch. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -151530,7 +159388,7 @@ func (c *UrlMapsPatchCall) Header() http.Header { func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151652,6 +159510,9 @@ type UrlMapsUpdateCall struct { // Update: Updates the specified UrlMap resource with the data included // in the request. +// +// - project: Project ID for this request. +// - urlMap: Name of the UrlMap resource to update. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -151707,7 +159568,7 @@ func (c *UrlMapsUpdateCall) Header() http.Header { func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151830,6 +159691,9 @@ type UrlMapsValidateCall struct { // Validate: Runs static validation for the UrlMap. In particular, the // tests of the provided UrlMap will be run. Calling this method does // NOT create the UrlMap. +// +// - project: Project ID for this request. +// - urlMap: Name of the UrlMap resource to be validated as. // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -151866,7 +159730,7 @@ func (c *UrlMapsValidateCall) Header() http.Header { func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -151981,6 +159845,8 @@ type VpnGatewaysAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of VPN gateways. +// +// - project: Project ID for this request. func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregatedListCall { c := &VpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -152068,7 +159934,7 @@ func (c *VpnGatewaysAggregatedListCall) PageToken(pageToken string) *VpnGateways // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *VpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -152111,7 +159977,7 @@ func (c *VpnGatewaysAggregatedListCall) Header() http.Header { func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152216,7 +160082,7 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -152268,6 +160134,10 @@ type VpnGatewaysDeleteCall struct { } // Delete: Deletes the specified VPN gateway. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - vpnGateway: Name of the VPN gateway to delete. func (r *VpnGatewaysService) Delete(project string, region string, vpnGateway string) *VpnGatewaysDeleteCall { c := &VpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -152322,7 +160192,7 @@ func (c *VpnGatewaysDeleteCall) Header() http.Header { func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152446,6 +160316,10 @@ type VpnGatewaysGetCall struct { // Get: Returns the specified VPN gateway. Gets a list of available VPN // gateways by making a list() request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - vpnGateway: Name of the VPN gateway to return. func (r *VpnGatewaysService) Get(project string, region string, vpnGateway string) *VpnGatewaysGetCall { c := &VpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -152491,7 +160365,7 @@ func (c *VpnGatewaysGetCall) Header() http.Header { func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152613,6 +160487,10 @@ type VpnGatewaysGetStatusCall struct { } // GetStatus: Returns the status for the specified VPN gateway. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - vpnGateway: Name of the VPN gateway to return. func (r *VpnGatewaysService) GetStatus(project string, region string, vpnGateway string) *VpnGatewaysGetStatusCall { c := &VpnGatewaysGetStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -152658,7 +160536,7 @@ func (c *VpnGatewaysGetStatusCall) Header() http.Header { func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152780,6 +160658,9 @@ type VpnGatewaysInsertCall struct { // Insert: Creates a VPN gateway in the specified project and region // using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *VpnGatewaysService) Insert(project string, region string, vpngateway *VpnGateway) *VpnGatewaysInsertCall { c := &VpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -152834,7 +160715,7 @@ func (c *VpnGatewaysInsertCall) Header() http.Header { func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -152956,6 +160837,9 @@ type VpnGatewaysListCall struct { // List: Retrieves a list of VPN gateways available to the specified // project and region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysListCall { c := &VpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -153031,7 +160915,7 @@ func (c *VpnGatewaysListCall) PageToken(pageToken string) *VpnGatewaysListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *VpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -153074,7 +160958,7 @@ func (c *VpnGatewaysListCall) Header() http.Header { func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153183,7 +161067,7 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -153237,6 +161121,10 @@ type VpnGatewaysSetLabelsCall struct { // SetLabels: Sets the labels on a VpnGateway. To learn more about // labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. func (r *VpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnGatewaysSetLabelsCall { c := &VpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -153292,7 +161180,7 @@ func (c *VpnGatewaysSetLabelsCall) Header() http.Header { func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153424,6 +161312,10 @@ type VpnGatewaysTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. func (r *VpnGatewaysService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *VpnGatewaysTestIamPermissionsCall { c := &VpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -153460,7 +161352,7 @@ func (c *VpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *VpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153585,6 +161477,8 @@ type VpnTunnelsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of VPN tunnels. +// +// - project: Project ID for this request. func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregatedListCall { c := &VpnTunnelsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -153672,7 +161566,7 @@ func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAg // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *VpnTunnelsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -153715,7 +161609,7 @@ func (c *VpnTunnelsAggregatedListCall) Header() http.Header { func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -153820,7 +161714,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -153872,6 +161766,10 @@ type VpnTunnelsDeleteCall struct { } // Delete: Deletes the specified VpnTunnel resource. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - vpnTunnel: Name of the VpnTunnel resource to delete. func (r *VpnTunnelsService) Delete(project string, region string, vpnTunnel string) *VpnTunnelsDeleteCall { c := &VpnTunnelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -153926,7 +161824,7 @@ func (c *VpnTunnelsDeleteCall) Header() http.Header { func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154050,6 +161948,10 @@ type VpnTunnelsGetCall struct { // Get: Returns the specified VpnTunnel resource. Gets a list of // available VPN tunnels by making a list() request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - vpnTunnel: Name of the VpnTunnel resource to return. func (r *VpnTunnelsService) Get(project string, region string, vpnTunnel string) *VpnTunnelsGetCall { c := &VpnTunnelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -154095,7 +161997,7 @@ func (c *VpnTunnelsGetCall) Header() http.Header { func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154217,6 +162119,9 @@ type VpnTunnelsInsertCall struct { // Insert: Creates a VpnTunnel resource in the specified project and // region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *VpnTunnelsService) Insert(project string, region string, vpntunnel *VpnTunnel) *VpnTunnelsInsertCall { c := &VpnTunnelsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -154271,7 +162176,7 @@ func (c *VpnTunnelsInsertCall) Header() http.Header { func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154393,6 +162298,9 @@ type VpnTunnelsListCall struct { // List: Retrieves a list of VpnTunnel resources contained in the // specified project and region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListCall { c := &VpnTunnelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -154468,7 +162376,7 @@ func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *VpnTunnelsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -154511,7 +162419,7 @@ func (c *VpnTunnelsListCall) Header() http.Header { func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154620,7 +162528,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } @@ -154672,6 +162580,10 @@ type ZoneOperationsDeleteCall struct { } // Delete: Deletes the specified zone-specific Operations resource. +// +// - operation: Name of the Operations resource to delete. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/delete func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall { c := &ZoneOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -154708,7 +162620,7 @@ func (c *ZoneOperationsDeleteCall) Header() http.Header { func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154798,6 +162710,10 @@ type ZoneOperationsGetCall struct { } // Get: Retrieves the specified zone-specific Operations resource. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/get func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall { c := &ZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -154844,7 +162760,7 @@ func (c *ZoneOperationsGetCall) Header() http.Header { func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -154966,6 +162882,9 @@ type ZoneOperationsListCall struct { // List: Retrieves a list of Operation resources contained within the // specified zone. +// +// - project: Project ID for this request. +// - zone: Name of the zone for request. // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/list func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall { c := &ZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -155042,7 +162961,7 @@ func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsList // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ZoneOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZoneOperationsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -155085,7 +163004,7 @@ func (c *ZoneOperationsListCall) Header() http.Header { func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155187,7 +163106,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // }, @@ -155259,6 +163178,10 @@ type ZoneOperationsWaitCall struct { // - If the default deadline is reached, there is no guarantee that the // operation is actually done when the method returns. Be prepared to // retry if the operation is not `DONE`. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. func (r *ZoneOperationsService) Wait(project string, zone string, operation string) *ZoneOperationsWaitCall { c := &ZoneOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -155294,7 +163217,7 @@ func (c *ZoneOperationsWaitCall) Header() http.Header { func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155413,6 +163336,9 @@ type ZonesGetCall struct { // Get: Returns the specified Zone resource. Gets a list of available // zones by making a list() request. +// +// - project: Project ID for this request. +// - zone: Name of the zone resource to return. // For details, see https://cloud.google.com/compute/docs/reference/latest/zones/get func (r *ZonesService) Get(project string, zone string) *ZonesGetCall { c := &ZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -155458,7 +163384,7 @@ func (c *ZonesGetCall) Header() http.Header { func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155570,6 +163496,8 @@ type ZonesListCall struct { // List: Retrieves the list of Zone resources available to the specified // project. +// +// - project: Project ID for this request. // For details, see https://cloud.google.com/compute/docs/reference/latest/zones/list func (r *ZonesService) List(project string) *ZonesListCall { c := &ZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -155645,7 +163573,7 @@ func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is -// false and the logic is the same as today. +// false. func (c *ZonesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZonesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c @@ -155688,7 +163616,7 @@ func (c *ZonesListCall) Header() http.Header { func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -155788,7 +163716,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // "type": "string" // }, // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index e6f1e845e84..2c34cc64bdd 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -3,7 +3,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "description": "See, edit, configure, and delete your Google Cloud Platform data" }, "https://www.googleapis.com/auth/cloud-platform.read-only": { "description": "View your data across Google Cloud Platform services" @@ -117,7 +117,7 @@ "changes": { "methods": { "create": { - "description": "Atomically update the ResourceRecordSet collection.", + "description": "Atomically updates the ResourceRecordSet collection.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", "httpMethod": "POST", "id": "dns.changes.create", @@ -157,7 +157,7 @@ ] }, "get": { - "description": "Fetch the representation of an existing Change.", + "description": "Fetches the representation of an existing Change.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", "httpMethod": "GET", "id": "dns.changes.get", @@ -203,7 +203,7 @@ ] }, "list": { - "description": "Enumerate Changes to a ResourceRecordSet collection.", + "description": "Enumerates Changes to a ResourceRecordSet collection.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", "httpMethod": "GET", "id": "dns.changes.list", @@ -219,7 +219,7 @@ "type": "string" }, "maxResults": { - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", "format": "int32", "location": "query", "type": "integer" @@ -269,7 +269,7 @@ "dnsKeys": { "methods": { "get": { - "description": "Fetch the representation of an existing DnsKey.", + "description": "Fetches the representation of an existing DnsKey.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", "httpMethod": "GET", "id": "dns.dnsKeys.get", @@ -285,7 +285,7 @@ "type": "string" }, "digestType": { - "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", "location": "query", "type": "string" }, @@ -320,7 +320,7 @@ ] }, "list": { - "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + "description": "Enumerates DnsKeys to a ResourceRecordSet collection.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", "httpMethod": "GET", "id": "dns.dnsKeys.list", @@ -330,7 +330,7 @@ ], "parameters": { "digestType": { - "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", "location": "query", "type": "string" }, @@ -341,7 +341,7 @@ "type": "string" }, "maxResults": { - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", "format": "int32", "location": "query", "type": "integer" @@ -374,7 +374,7 @@ "managedZoneOperations": { "methods": { "get": { - "description": "Fetch the representation of an existing Operation.", + "description": "Fetches the representation of an existing Operation.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", "httpMethod": "GET", "id": "dns.managedZoneOperations.get", @@ -396,7 +396,7 @@ "type": "string" }, "operation": { - "description": "Identifies the operation addressed by this request.", + "description": "Identifies the operation addressed by this request (ID of the operation).", "location": "path", "required": true, "type": "string" @@ -420,7 +420,7 @@ ] }, "list": { - "description": "Enumerate Operations for the given ManagedZone.", + "description": "Enumerates Operations for the given ManagedZone.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", "httpMethod": "GET", "id": "dns.managedZoneOperations.list", @@ -436,7 +436,7 @@ "type": "string" }, "maxResults": { - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", "format": "int32", "location": "query", "type": "integer" @@ -483,7 +483,7 @@ "managedZones": { "methods": { "create": { - "description": "Create a new ManagedZone.", + "description": "Creates a new ManagedZone.", "flatPath": "dns/v1/projects/{project}/managedZones", "httpMethod": "POST", "id": "dns.managedZones.create", @@ -516,7 +516,7 @@ ] }, "delete": { - "description": "Delete a previously created ManagedZone.", + "description": "Deletes a previously created ManagedZone.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "DELETE", "id": "dns.managedZones.delete", @@ -550,7 +550,7 @@ ] }, "get": { - "description": "Fetch the representation of an existing ManagedZone.", + "description": "Fetches the representation of an existing ManagedZone.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "GET", "id": "dns.managedZones.get", @@ -589,7 +589,7 @@ ] }, "list": { - "description": "Enumerate ManagedZones that have been created but not yet deleted.", + "description": "Enumerates ManagedZones that have been created but not yet deleted.", "flatPath": "dns/v1/projects/{project}/managedZones", "httpMethod": "GET", "id": "dns.managedZones.list", @@ -603,7 +603,7 @@ "type": "string" }, "maxResults": { - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", "format": "int32", "location": "query", "type": "integer" @@ -632,7 +632,7 @@ ] }, "patch": { - "description": "Apply a partial update to an existing ManagedZone.", + "description": "Applies a partial update to an existing ManagedZone.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "PATCH", "id": "dns.managedZones.patch", @@ -672,7 +672,7 @@ ] }, "update": { - "description": "Update an existing ManagedZone.", + "description": "Updates an existing ManagedZone.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", "httpMethod": "PUT", "id": "dns.managedZones.update", @@ -716,7 +716,7 @@ "policies": { "methods": { "create": { - "description": "Create a new Policy", + "description": "Creates a new Policy.", "flatPath": "dns/v1/projects/{project}/policies", "httpMethod": "POST", "id": "dns.policies.create", @@ -749,7 +749,7 @@ ] }, "delete": { - "description": "Delete a previously created Policy. Will fail if the policy is still being referenced by a network.", + "description": "Deletes a previously created Policy. Fails if the policy is still being referenced by a network.", "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "DELETE", "id": "dns.policies.delete", @@ -783,7 +783,7 @@ ] }, "get": { - "description": "Fetch the representation of an existing Policy.", + "description": "Fetches the representation of an existing Policy.", "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "GET", "id": "dns.policies.get", @@ -822,7 +822,7 @@ ] }, "list": { - "description": "Enumerate all Policies associated with a project.", + "description": "Enumerates all Policies associated with a project.", "flatPath": "dns/v1/projects/{project}/policies", "httpMethod": "GET", "id": "dns.policies.list", @@ -831,7 +831,7 @@ ], "parameters": { "maxResults": { - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", "format": "int32", "location": "query", "type": "integer" @@ -860,7 +860,7 @@ ] }, "patch": { - "description": "Apply a partial update to an existing Policy.", + "description": "Applies a partial update to an existing Policy.", "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "PATCH", "id": "dns.policies.patch", @@ -900,7 +900,7 @@ ] }, "update": { - "description": "Update an existing Policy.", + "description": "Updates an existing Policy.", "flatPath": "dns/v1/projects/{project}/policies/{policy}", "httpMethod": "PUT", "id": "dns.policies.update", @@ -944,7 +944,7 @@ "projects": { "methods": { "get": { - "description": "Fetch the representation of an existing Project.", + "description": "Fetches the representation of an existing Project.", "flatPath": "dns/v1/projects/{project}", "httpMethod": "GET", "id": "dns.projects.get", @@ -975,12 +975,220 @@ "https://www.googleapis.com/auth/ndev.clouddns.readwrite" ] } + }, + "resources": { + "managedZones": { + "resources": { + "rrsets": { + "methods": { + "create": { + "description": "Creates a new ResourceRecordSet.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + "httpMethod": "POST", + "id": "dns.projects.managedZones.rrsets.create", + "parameterOrder": [ + "project", + "managedZone" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + "request": { + "$ref": "ResourceRecordSet" + }, + "response": { + "$ref": "ResourceRecordSet" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "delete": { + "description": "Deletes a previously created ResourceRecordSet.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + "httpMethod": "DELETE", + "id": "dns.projects.managedZones.rrsets.delete", + "parameterOrder": [ + "project", + "managedZone", + "name", + "type" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + "location": "path", + "required": true, + "type": "string" + }, + "name": { + "description": "Fully qualified domain name.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "type": { + "description": "RRSet type.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + "response": { + "$ref": "ResourceRecordSetsDeleteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "get": { + "description": "Fetches the representation of an existing ResourceRecordSet.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + "httpMethod": "GET", + "id": "dns.projects.managedZones.rrsets.get", + "parameterOrder": [ + "project", + "managedZone", + "name", + "type" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + "location": "path", + "required": true, + "type": "string" + }, + "name": { + "description": "Fully qualified domain name.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "type": { + "description": "RRSet type.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + "response": { + "$ref": "ResourceRecordSet" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "patch": { + "description": "Applies a partial update to an existing ResourceRecordSet.", + "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + "httpMethod": "PATCH", + "id": "dns.projects.managedZones.rrsets.patch", + "parameterOrder": [ + "project", + "managedZone", + "name", + "type" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + "location": "path", + "required": true, + "type": "string" + }, + "name": { + "description": "Fully qualified domain name.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "type": { + "description": "RRSet type.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + "request": { + "$ref": "ResourceRecordSet" + }, + "response": { + "$ref": "ResourceRecordSet" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + } + } + } } }, "resourceRecordSets": { "methods": { "list": { - "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + "description": "Enumerates ResourceRecordSets that you have created but not yet deleted.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", "httpMethod": "GET", "id": "dns.resourceRecordSets.list", @@ -996,7 +1204,7 @@ "type": "string" }, "maxResults": { - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", "format": "int32", "location": "query", "type": "integer" @@ -1037,7 +1245,7 @@ } } }, - "revision": "20210204", + "revision": "20210319", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -1075,7 +1283,7 @@ "type": "string" }, "status": { - "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent but the servers might not be updated yet.", + "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", "enum": [ "pending", "done" @@ -1109,7 +1317,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. This lets you retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a \"snapshot\" of collections larger than the maximum page size.", "type": "string" } }, @@ -1157,11 +1365,11 @@ "type": "string" }, "isActive": { - "description": "Active keys will be used to sign subsequent changes to the ManagedZone. Inactive keys will still be present as DNSKEY Resource Records for the use of resolvers validating existing signatures.", + "description": "Active keys are used to sign subsequent changes to the ManagedZone. Inactive keys are still present as DNSKEY Resource Records for the use of resolvers validating existing signatures.", "type": "boolean" }, "keyLength": { - "description": "Length of the key in bits. Specified at creation time then immutable.", + "description": "Length of the key in bits. Specified at creation time, and then immutable.", "format": "uint32", "type": "integer" }, @@ -1179,7 +1387,7 @@ "type": "string" }, "type": { - "description": "One of \"KEY_SIGNING\" or \"ZONE_SIGNING\". Keys of type KEY_SIGNING have the Secure Entry Point flag set and, when active, will be used to sign only resource record sets of type DNSKEY. Otherwise, the Secure Entry Point flag will be cleared and this key will be used to sign only resource record sets of other types. Immutable after creation time.", + "description": "One of \"KEY_SIGNING\" or \"ZONE_SIGNING\". Keys of type KEY_SIGNING have the Secure Entry Point flag set and, when active, are used to sign only resource record sets of type DNSKEY. Otherwise, the Secure Entry Point flag is cleared, and this key is used to sign only resource record sets of other types. Immutable after creation time.", "enum": [ "keySigning", "zoneSigning" @@ -1245,7 +1453,7 @@ "type": "integer" }, "keyType": { - "description": "Specifies whether this is a key signing key (KSK) or a zone signing key (ZSK). Key signing keys have the Secure Entry Point flag set and, when active, will only be used to sign resource record sets of type DNSKEY. Zone signing keys do not have the Secure Entry Point flag set and will be used to sign all other types of resource record sets.", + "description": "Specifies whether this is a key signing key (KSK) or a zone signing key (ZSK). Key signing keys have the Secure Entry Point flag set and, when active, are only used to sign resource record sets of type DNSKEY. Zone signing keys do not have the Secure Entry Point flag set and are used to sign all other types of resource record sets.", "enum": [ "keySigning", "zoneSigning" @@ -1283,7 +1491,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", "type": "string" } }, @@ -1334,7 +1542,7 @@ "type": "string" }, "nameServerSet": { - "description": "Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users will leave this field unset.", + "description": "Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users leave this field unset. If you need to use this field, contact your account team.", "type": "string" }, "nameServers": { @@ -1354,11 +1562,11 @@ }, "reverseLookupConfig": { "$ref": "ManagedZoneReverseLookupConfig", - "description": "The presence of this field indicates that this is a managed reverse lookup zone and Cloud DNS will resolve reverse lookup queries using automatically configured records for VPC resources. This only applies to networks listed under private_visibility_config." + "description": "The presence of this field indicates that this is a managed reverse lookup zone and Cloud DNS resolves reverse lookup queries using automatically configured records for VPC resources. This only applies to networks listed under private_visibility_config." }, "serviceDirectoryConfig": { "$ref": "ManagedZoneServiceDirectoryConfig", - "description": "This field links to the associated service directory namespace. This field should not be set for public zones or forwarding zones." + "description": "This field links to the associated service directory namespace. Do not set this field for public zones or forwarding zones." }, "visibility": { "description": "The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources.", @@ -1426,7 +1634,7 @@ "type": "string" }, "targetNameServers": { - "description": "List of target name servers to forward to. Cloud DNS will select the best available name server if more than one target is given.", + "description": "List of target name servers to forward to. Cloud DNS selects the best available name server if more than one target is given.", "items": { "$ref": "ManagedZoneForwardingConfigNameServerTarget" }, @@ -1439,14 +1647,14 @@ "id": "ManagedZoneForwardingConfigNameServerTarget", "properties": { "forwardingPath": { - "description": "Forwarding path for this NameServerTarget. If unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target.", + "description": "Forwarding path for this NameServerTarget. If unset or set to DEFAULT, Cloud DNS makes forwarding decisions based on IP address ranges; that is, RFC1918 addresses go to the VPC network, non-RFC1918 addresses go to the internet. When set to PRIVATE, Cloud DNS always sends queries through the VPC network for this target.", "enum": [ "default", "private" ], "enumDescriptions": [ - "Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses will forward to the target through the Internet", - "Cloud DNS will always forward to this target through the VPC." + "Cloud DNS makes forwarding decisions based on address ranges; that is, RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses forward to the target through the internet", + "Cloud DNS always forwards to this target through the VPC." ], "type": "string" }, @@ -1472,7 +1680,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. This lets you retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "operations": { @@ -1542,7 +1750,7 @@ "type": "string" }, "networkUrl": { - "description": "The fully qualified URL of the VPC network to bind to. This should be formatted like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}", + "description": "The fully qualified URL of the VPC network to bind to. Format this URL like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}", "type": "string" } }, @@ -1577,7 +1785,7 @@ "id": "ManagedZoneServiceDirectoryConfigNamespace", "properties": { "deletionTime": { - "description": "The time that the namespace backing this zone was deleted, empty string if it still exists. This is in RFC3339 text format. Output only.", + "description": "The time that the namespace backing this zone was deleted; an empty string if it still exists. This is in RFC3339 text format. Output only.", "type": "string" }, "kind": { @@ -1585,7 +1793,7 @@ "type": "string" }, "namespaceUrl": { - "description": "The fully qualified URL of the namespace associated with the zone. This should be formatted like https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace}", + "description": "The fully qualified URL of the namespace associated with the zone. Format must be https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace}", "type": "string" } }, @@ -1610,7 +1818,7 @@ "type": "array" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. This lets you the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" } }, @@ -1703,7 +1911,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. This lets you the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "policies": { @@ -1753,7 +1961,7 @@ "type": "string" }, "enableInboundForwarding": { - "description": "Allows networks bound to this policy to receive DNS queries sent by VMs or applications over VPN connections. When enabled, a virtual IP address will be allocated from each of the sub-networks that are bound to this policy.", + "description": "Allows networks bound to this policy to receive DNS queries sent by VMs or applications over VPN connections. When enabled, a virtual IP address is allocated from each of the subnetworks that are bound to this policy.", "type": "boolean" }, "enableLogging": { @@ -1770,7 +1978,7 @@ "type": "string" }, "name": { - "description": "User assigned name for this policy.", + "description": "User-assigned name for this policy.", "type": "string" }, "networks": { @@ -1804,14 +2012,14 @@ "id": "PolicyAlternativeNameServerConfigTargetNameServer", "properties": { "forwardingPath": { - "description": "Forwarding path for this TargetNameServer. If unset or set to DEFAULT, Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 addresses go to the Internet. When set to PRIVATE, Cloud DNS will always send queries through VPC for this target.", + "description": "Forwarding path for this TargetNameServer. If unset or set to DEFAULT, Cloud DNS makes forwarding decisions based on address ranges; that is, RFC1918 addresses go to the VPC network, non-RFC1918 addresses go to the internet. When set to PRIVATE, Cloud DNS always sends queries through the VPC network for this target.", "enum": [ "default", "private" ], "enumDescriptions": [ - "Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses will forward to the target through the Internet", - "Cloud DNS will always forward to this target through the VPC." + "Cloud DNS makes forwarding decision based on IP address ranges; that is, RFC1918 addresses forward to the target through the VPC and non-RFC1918 addresses forward to the target through the internet", + "Cloud DNS always forwards to this target through the VPC." ], "type": "string" }, @@ -1841,7 +2049,7 @@ "type": "object" }, "Project": { - "description": "A project resource. The project is a top level container for resources including Cloud DNS ManagedZones. Projects can be created only in the APIs console.", + "description": "A project resource. The project is a top level container for resources including Cloud DNS ManagedZones. Projects can be created only in the APIs console. Next tag: 7.", "id": "Project", "properties": { "id": { @@ -1948,7 +2156,7 @@ "type": "object" }, "ResourceRecordSet": { - "description": "A unit of data that will be returned by the DNS servers.", + "description": "A unit of data that is returned by the DNS servers.", "id": "ResourceRecordSet", "properties": { "kind": { @@ -1985,6 +2193,11 @@ }, "type": "object" }, + "ResourceRecordSetsDeleteResponse": { + "id": "ResourceRecordSetsDeleteResponse", + "properties": {}, + "type": "object" + }, "ResourceRecordSetsListResponse": { "id": "ResourceRecordSetsListResponse", "properties": { @@ -1997,7 +2210,7 @@ "type": "string" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. This lets you retrieve complete contents of even larger collections, one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "rrsets": { diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 0d02bdafc3a..3f6ab22211f 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -83,7 +83,7 @@ const mtlsBasePath = "https://dns.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( - // View and manage your data across Google Cloud Platform services + // See, edit, configure, and delete your Google Cloud Platform data CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View your data across Google Cloud Platform services @@ -216,11 +216,35 @@ type PoliciesService struct { func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} + rs.ManagedZones = NewProjectsManagedZonesService(s) return rs } type ProjectsService struct { s *Service + + ManagedZones *ProjectsManagedZonesService +} + +func NewProjectsManagedZonesService(s *Service) *ProjectsManagedZonesService { + rs := &ProjectsManagedZonesService{s: s} + rs.Rrsets = NewProjectsManagedZonesRrsetsService(s) + return rs +} + +type ProjectsManagedZonesService struct { + s *Service + + Rrsets *ProjectsManagedZonesRrsetsService +} + +func NewProjectsManagedZonesRrsetsService(s *Service) *ProjectsManagedZonesRrsetsService { + rs := &ProjectsManagedZonesRrsetsService{s: s} + return rs +} + +type ProjectsManagedZonesRrsetsService struct { + s *Service } func NewResourceRecordSetsService(s *Service) *ResourceRecordSetsService { @@ -262,7 +286,7 @@ type Change struct { // Status: Status of the operation (output only). A status of "done" // means that the request to update the authoritative servers has been - // sent but the servers might not be updated yet. + // sent, but the servers might not be updated yet. // // Possible values: // "pending" @@ -310,12 +334,12 @@ type ChangesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. In this way you can retrieve the complete contents - // of even very large collections one page at a time. However, if the + // pagination token. This lets you retrieve the complete contents of + // even very large collections one page at a time. However, if the // contents of the collection change between the first and last - // paginated list request, the set of all elements returned will be an - // inconsistent view of the collection. There is no way to retrieve a - // "snapshot" of collections larger than the maximum page size. + // paginated list request, the set of all elements returned are an + // inconsistent view of the collection. You cannot retrieve a "snapshot" + // of collections larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -376,13 +400,13 @@ type DnsKey struct { // only). Id string `json:"id,omitempty"` - // IsActive: Active keys will be used to sign subsequent changes to the - // ManagedZone. Inactive keys will still be present as DNSKEY Resource + // IsActive: Active keys are used to sign subsequent changes to the + // ManagedZone. Inactive keys are still present as DNSKEY Resource // Records for the use of resolvers validating existing signatures. IsActive bool `json:"isActive,omitempty"` - // KeyLength: Length of the key in bits. Specified at creation time then - // immutable. + // KeyLength: Length of the key in bits. Specified at creation time, and + // then immutable. KeyLength int64 `json:"keyLength,omitempty"` // KeyTag: The key tag is a non-cryptographic hash of the a DNSKEY @@ -401,10 +425,10 @@ type DnsKey struct { // Type: One of "KEY_SIGNING" or "ZONE_SIGNING". Keys of type // KEY_SIGNING have the Secure Entry Point flag set and, when active, - // will be used to sign only resource record sets of type DNSKEY. - // Otherwise, the Secure Entry Point flag will be cleared and this key - // will be used to sign only resource record sets of other types. - // Immutable after creation time. + // are used to sign only resource record sets of type DNSKEY. Otherwise, + // the Secure Entry Point flag is cleared, and this key is used to sign + // only resource record sets of other types. Immutable after creation + // time. // // Possible values: // "keySigning" @@ -494,9 +518,9 @@ type DnsKeySpec struct { // KeyType: Specifies whether this is a key signing key (KSK) or a zone // signing key (ZSK). Key signing keys have the Secure Entry Point flag - // set and, when active, will only be used to sign resource record sets - // of type DNSKEY. Zone signing keys do not have the Secure Entry Point - // flag set and will be used to sign all other types of resource record + // set and, when active, are only used to sign resource record sets of + // type DNSKEY. Zone signing keys do not have the Secure Entry Point + // flag set and are used to sign all other types of resource record // sets. // // Possible values: @@ -546,7 +570,7 @@ type DnsKeysListResponse struct { // pagination token. In this way you can retrieve the complete contents // of even very large collections one page at a time. However, if the // contents of the collection change between the first and last - // paginated list request, the set of all elements returned will be an + // paginated list request, the set of all elements returned are an // inconsistent view of the collection. There is no way to retrieve a // "snapshot" of collections larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` @@ -620,7 +644,8 @@ type ManagedZone struct { // NameServerSet: Optionally specifies the NameServerSet for this // ManagedZone. A NameServerSet is a set of DNS name servers that all - // host the same ManagedZones. Most users will leave this field unset. + // host the same ManagedZones. Most users leave this field unset. If you + // need to use this field, contact your account team. NameServerSet string `json:"nameServerSet,omitempty"` // NameServers: Delegate your managed_zone to these virtual name @@ -637,14 +662,14 @@ type ManagedZone struct { PrivateVisibilityConfig *ManagedZonePrivateVisibilityConfig `json:"privateVisibilityConfig,omitempty"` // ReverseLookupConfig: The presence of this field indicates that this - // is a managed reverse lookup zone and Cloud DNS will resolve reverse + // is a managed reverse lookup zone and Cloud DNS resolves reverse // lookup queries using automatically configured records for VPC // resources. This only applies to networks listed under // private_visibility_config. ReverseLookupConfig *ManagedZoneReverseLookupConfig `json:"reverseLookupConfig,omitempty"` // ServiceDirectoryConfig: This field links to the associated service - // directory namespace. This field should not be set for public zones or + // directory namespace. Do not set this field for public zones or // forwarding zones. ServiceDirectoryConfig *ManagedZoneServiceDirectoryConfig `json:"serviceDirectoryConfig,omitempty"` @@ -736,8 +761,8 @@ type ManagedZoneForwardingConfig struct { Kind string `json:"kind,omitempty"` // TargetNameServers: List of target name servers to forward to. Cloud - // DNS will select the best available name server if more than one - // target is given. + // DNS selects the best available name server if more than one target is + // given. TargetNameServers []*ManagedZoneForwardingConfigNameServerTarget `json:"targetNameServers,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to @@ -765,18 +790,18 @@ func (s *ManagedZoneForwardingConfig) MarshalJSON() ([]byte, error) { type ManagedZoneForwardingConfigNameServerTarget struct { // ForwardingPath: Forwarding path for this NameServerTarget. If unset - // or set to DEFAULT, Cloud DNS will make forwarding decision based on - // address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 - // addresses go to the Internet. When set to PRIVATE, Cloud DNS will - // always send queries through VPC for this target. + // or set to DEFAULT, Cloud DNS makes forwarding decisions based on IP + // address ranges; that is, RFC1918 addresses go to the VPC network, + // non-RFC1918 addresses go to the internet. When set to PRIVATE, Cloud + // DNS always sends queries through the VPC network for this target. // // Possible values: - // "default" - Cloud DNS will make forwarding decision based on - // address ranges, i.e. RFC1918 addresses forward to the target through - // the VPC and non-RFC1918 addresses will forward to the target through - // the Internet - // "private" - Cloud DNS will always forward to this target through - // the VPC. + // "default" - Cloud DNS makes forwarding decisions based on address + // ranges; that is, RFC1918 addresses forward to the target through the + // VPC and non-RFC1918 addresses forward to the target through the + // internet + // "private" - Cloud DNS always forwards to this target through the + // VPC. ForwardingPath string `json:"forwardingPath,omitempty"` // Ipv4Address: IPv4 address of a target name server. @@ -816,13 +841,12 @@ type ManagedZoneOperationsListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. In this way you can retrieve the complete contents of - // even very large collections one page at a time. However, if the - // contents of the collection change between the first and last - // paginated list request, the set of all elements returned will be an - // inconsistent view of the collection. There is no way to retrieve a - // consistent snapshot of a collection larger than the maximum page - // size. + // page token. This lets you retrieve the complete contents of even very + // large collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned are an inconsistent view of the + // collection. You cannot retrieve a consistent snapshot of a collection + // larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // Operations: The operation resources. @@ -956,7 +980,7 @@ type ManagedZonePrivateVisibilityConfigNetwork struct { Kind string `json:"kind,omitempty"` // NetworkUrl: The fully qualified URL of the VPC network to bind to. - // This should be formatted like + // Format this URL like // https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} NetworkUrl string `json:"networkUrl,omitempty"` @@ -1043,14 +1067,14 @@ func (s *ManagedZoneServiceDirectoryConfig) MarshalJSON() ([]byte, error) { type ManagedZoneServiceDirectoryConfigNamespace struct { // DeletionTime: The time that the namespace backing this zone was - // deleted, empty string if it still exists. This is in RFC3339 text + // deleted; an empty string if it still exists. This is in RFC3339 text // format. Output only. DeletionTime string `json:"deletionTime,omitempty"` Kind string `json:"kind,omitempty"` // NamespaceUrl: The fully qualified URL of the namespace associated - // with the zone. This should be formatted like + // with the zone. Format must be // https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace} NamespaceUrl string `json:"namespaceUrl,omitempty"` @@ -1089,13 +1113,12 @@ type ManagedZonesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. In this way you can retrieve the complete contents of - // even very large collections one page at a time. However, if the - // contents of the collection change between the first and last - // paginated list request, the set of all elements returned will be an - // inconsistent view of the collection. There is no way to retrieve a - // consistent snapshot of a collection larger than the maximum page - // size. + // page token. This lets you the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned are an inconsistent view of the + // collection. You cannot retrieve a consistent snapshot of a collection + // larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1267,13 +1290,12 @@ type PoliciesListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // page token. In this way you can retrieve the complete contents of - // even very large collections one page at a time. However, if the - // contents of the collection change between the first and last - // paginated list request, the set of all elements returned will be an - // inconsistent view of the collection. There is no way to retrieve a - // consistent snapshot of a collection larger than the maximum page - // size. + // page token. This lets you the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned are an inconsistent view of the + // collection. You cannot retrieve a consistent snapshot of a collection + // larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // Policies: The policy resources. @@ -1386,8 +1408,8 @@ type Policy struct { // EnableInboundForwarding: Allows networks bound to this policy to // receive DNS queries sent by VMs or applications over VPN connections. - // When enabled, a virtual IP address will be allocated from each of the - // sub-networks that are bound to this policy. + // When enabled, a virtual IP address is allocated from each of the + // subnetworks that are bound to this policy. EnableInboundForwarding bool `json:"enableInboundForwarding,omitempty"` // EnableLogging: Controls whether logging is enabled for the networks @@ -1400,7 +1422,7 @@ type Policy struct { Kind string `json:"kind,omitempty"` - // Name: User assigned name for this policy. + // Name: User-assigned name for this policy. Name string `json:"name,omitempty"` // Networks: List of network names specifying networks to which this @@ -1470,18 +1492,18 @@ func (s *PolicyAlternativeNameServerConfig) MarshalJSON() ([]byte, error) { type PolicyAlternativeNameServerConfigTargetNameServer struct { // ForwardingPath: Forwarding path for this TargetNameServer. If unset - // or set to DEFAULT, Cloud DNS will make forwarding decision based on - // address ranges, i.e. RFC1918 addresses go to the VPC, non-RFC1918 - // addresses go to the Internet. When set to PRIVATE, Cloud DNS will - // always send queries through VPC for this target. + // or set to DEFAULT, Cloud DNS makes forwarding decisions based on + // address ranges; that is, RFC1918 addresses go to the VPC network, + // non-RFC1918 addresses go to the internet. When set to PRIVATE, Cloud + // DNS always sends queries through the VPC network for this target. // // Possible values: - // "default" - Cloud DNS will make forwarding decision based on - // address ranges, i.e. RFC1918 addresses forward to the target through - // the VPC and non-RFC1918 addresses will forward to the target through - // the Internet - // "private" - Cloud DNS will always forward to this target through - // the VPC. + // "default" - Cloud DNS makes forwarding decision based on IP address + // ranges; that is, RFC1918 addresses forward to the target through the + // VPC and non-RFC1918 addresses forward to the target through the + // internet + // "private" - Cloud DNS always forwards to this target through the + // VPC. ForwardingPath string `json:"forwardingPath,omitempty"` // Ipv4Address: IPv4 address to forward to. @@ -1546,7 +1568,7 @@ func (s *PolicyNetwork) MarshalJSON() ([]byte, error) { // Project: A project resource. The project is a top level container for // resources including Cloud DNS ManagedZones. Projects can be created -// only in the APIs console. +// only in the APIs console. Next tag: 7. type Project struct { // Id: User assigned unique identifier for the resource (output only). Id string `json:"id,omitempty"` @@ -1669,7 +1691,7 @@ func (s *Quota) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourceRecordSet: A unit of data that will be returned by the DNS +// ResourceRecordSet: A unit of data that is returned by the DNS // servers. type ResourceRecordSet struct { Kind string `json:"kind,omitempty"` @@ -1692,6 +1714,10 @@ type ResourceRecordSet struct { // Supported DNS record types. Type string `json:"type,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1715,6 +1741,12 @@ func (s *ResourceRecordSet) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ResourceRecordSetsDeleteResponse struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + type ResourceRecordSetsListResponse struct { Header *ResponseHeader `json:"header,omitempty"` @@ -1724,13 +1756,12 @@ type ResourceRecordSetsListResponse struct { // NextPageToken: The presence of this field indicates that there exist // more results following your last page of results in pagination order. // To fetch them, make another list request using this value as your - // pagination token. In this way you can retrieve the complete contents - // of even very large collections one page at a time. However, if the - // contents of the collection change between the first and last - // paginated list request, the set of all elements returned will be an - // inconsistent view of the collection. There is no way to retrieve a - // consistent snapshot of a collection larger than the maximum page - // size. + // pagination token. This lets you retrieve complete contents of even + // larger collections, one page at a time. However, if the contents of + // the collection change between the first and last paginated list + // request, the set of elements returned are an inconsistent view of the + // collection. You cannot retrieve a consistent snapshot of a collection + // larger than the maximum page size. NextPageToken string `json:"nextPageToken,omitempty"` // Rrsets: The resource record set resources. @@ -1805,7 +1836,11 @@ type ChangesCreateCall struct { header_ http.Header } -// Create: Atomically update the ResourceRecordSet collection. +// Create: Atomically updates the ResourceRecordSet collection. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ChangesService) Create(project string, managedZone string, change *Change) *ChangesCreateCall { c := &ChangesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -1850,7 +1885,7 @@ func (c *ChangesCreateCall) Header() http.Header { func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -1915,7 +1950,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { } return ret, nil // { - // "description": "Atomically update the ResourceRecordSet collection.", + // "description": "Atomically updates the ResourceRecordSet collection.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", // "httpMethod": "POST", // "id": "dns.changes.create", @@ -1970,7 +2005,13 @@ type ChangesGetCall struct { header_ http.Header } -// Get: Fetch the representation of an existing Change. +// Get: Fetches the representation of an existing Change. +// +// - changeId: The identifier of the requested change, from a previous +// ResourceRecordSetsChangeResponse. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ChangesService) Get(project string, managedZone string, changeId string) *ChangesGetCall { c := &ChangesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -2025,7 +2066,7 @@ func (c *ChangesGetCall) Header() http.Header { func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2089,7 +2130,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { } return ret, nil // { - // "description": "Fetch the representation of an existing Change.", + // "description": "Fetches the representation of an existing Change.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", // "httpMethod": "GET", // "id": "dns.changes.get", @@ -2149,7 +2190,11 @@ type ChangesListCall struct { header_ http.Header } -// List: Enumerate Changes to a ResourceRecordSet collection. +// List: Enumerates Changes to a ResourceRecordSet collection. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ChangesService) List(project string, managedZone string) *ChangesListCall { c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -2158,7 +2203,7 @@ func (r *ChangesService) List(project string, managedZone string) *ChangesListCa } // MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how +// of results to be returned. If unspecified, the server decides how // many results to return. func (c *ChangesListCall) MaxResults(maxResults int64) *ChangesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) @@ -2227,7 +2272,7 @@ func (c *ChangesListCall) Header() http.Header { func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2290,7 +2335,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse } return ret, nil // { - // "description": "Enumerate Changes to a ResourceRecordSet collection.", + // "description": "Enumerates Changes to a ResourceRecordSet collection.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", // "httpMethod": "GET", // "id": "dns.changes.list", @@ -2306,7 +2351,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse // "type": "string" // }, // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", // "format": "int32", // "location": "query", // "type": "integer" @@ -2388,7 +2433,12 @@ type DnsKeysGetCall struct { header_ http.Header } -// Get: Fetch the representation of an existing DnsKey. +// Get: Fetches the representation of an existing DnsKey. +// +// - dnsKeyId: The identifier of the requested DnsKey. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *DnsKeysService) Get(project string, managedZone string, dnsKeyId string) *DnsKeysGetCall { c := &DnsKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -2408,8 +2458,8 @@ func (c *DnsKeysGetCall) ClientOperationId(clientOperationId string) *DnsKeysGet // DigestType sets the optional parameter "digestType": An optional // comma-separated list of digest types to compute and display for key -// signing keys. If omitted, the recommended digest type will be -// computed and displayed. +// signing keys. If omitted, the recommended digest type is computed and +// displayed. func (c *DnsKeysGetCall) DigestType(digestType string) *DnsKeysGetCall { c.urlParams_.Set("digestType", digestType) return c @@ -2452,7 +2502,7 @@ func (c *DnsKeysGetCall) Header() http.Header { func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2516,7 +2566,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { } return ret, nil // { - // "description": "Fetch the representation of an existing DnsKey.", + // "description": "Fetches the representation of an existing DnsKey.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", // "httpMethod": "GET", // "id": "dns.dnsKeys.get", @@ -2532,7 +2582,7 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { // "type": "string" // }, // "digestType": { - // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", // "location": "query", // "type": "string" // }, @@ -2581,7 +2631,11 @@ type DnsKeysListCall struct { header_ http.Header } -// List: Enumerate DnsKeys to a ResourceRecordSet collection. +// List: Enumerates DnsKeys to a ResourceRecordSet collection. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *DnsKeysService) List(project string, managedZone string) *DnsKeysListCall { c := &DnsKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -2591,15 +2645,15 @@ func (r *DnsKeysService) List(project string, managedZone string) *DnsKeysListCa // DigestType sets the optional parameter "digestType": An optional // comma-separated list of digest types to compute and display for key -// signing keys. If omitted, the recommended digest type will be -// computed and displayed. +// signing keys. If omitted, the recommended digest type is computed and +// displayed. func (c *DnsKeysListCall) DigestType(digestType string) *DnsKeysListCall { c.urlParams_.Set("digestType", digestType) return c } // MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how +// of results to be returned. If unspecified, the server decides how // many results to return. func (c *DnsKeysListCall) MaxResults(maxResults int64) *DnsKeysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) @@ -2651,7 +2705,7 @@ func (c *DnsKeysListCall) Header() http.Header { func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2714,7 +2768,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse } return ret, nil // { - // "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + // "description": "Enumerates DnsKeys to a ResourceRecordSet collection.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", // "httpMethod": "GET", // "id": "dns.dnsKeys.list", @@ -2724,7 +2778,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse // ], // "parameters": { // "digestType": { - // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", // "location": "query", // "type": "string" // }, @@ -2735,7 +2789,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse // "type": "string" // }, // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", // "format": "int32", // "location": "query", // "type": "integer" @@ -2800,7 +2854,12 @@ type ManagedZoneOperationsGetCall struct { header_ http.Header } -// Get: Fetch the representation of an existing Operation. +// Get: Fetches the representation of an existing Operation. +// +// - managedZone: Identifies the managed zone addressed by this request. +// - operation: Identifies the operation addressed by this request (ID +// of the operation). +// - project: Identifies the project addressed by this request. func (r *ManagedZoneOperationsService) Get(project string, managedZone string, operation string) *ManagedZoneOperationsGetCall { c := &ManagedZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -2855,7 +2914,7 @@ func (c *ManagedZoneOperationsGetCall) Header() http.Header { func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2919,7 +2978,7 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Fetch the representation of an existing Operation.", + // "description": "Fetches the representation of an existing Operation.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", // "httpMethod": "GET", // "id": "dns.managedZoneOperations.get", @@ -2941,7 +3000,7 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // }, // "operation": { - // "description": "Identifies the operation addressed by this request.", + // "description": "Identifies the operation addressed by this request (ID of the operation).", // "location": "path", // "required": true, // "type": "string" @@ -2979,7 +3038,10 @@ type ManagedZoneOperationsListCall struct { header_ http.Header } -// List: Enumerate Operations for the given ManagedZone. +// List: Enumerates Operations for the given ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// - project: Identifies the project addressed by this request. func (r *ManagedZoneOperationsService) List(project string, managedZone string) *ManagedZoneOperationsListCall { c := &ManagedZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -2988,7 +3050,7 @@ func (r *ManagedZoneOperationsService) List(project string, managedZone string) } // MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how +// of results to be returned. If unspecified, the server decides how // many results to return. func (c *ManagedZoneOperationsListCall) MaxResults(maxResults int64) *ManagedZoneOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) @@ -3051,7 +3113,7 @@ func (c *ManagedZoneOperationsListCall) Header() http.Header { func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3115,7 +3177,7 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag } return ret, nil // { - // "description": "Enumerate Operations for the given ManagedZone.", + // "description": "Enumerates Operations for the given ManagedZone.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", // "httpMethod": "GET", // "id": "dns.managedZoneOperations.list", @@ -3131,7 +3193,7 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag // "type": "string" // }, // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", // "format": "int32", // "location": "query", // "type": "integer" @@ -3208,7 +3270,9 @@ type ManagedZonesCreateCall struct { header_ http.Header } -// Create: Create a new ManagedZone. +// Create: Creates a new ManagedZone. +// +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) *ManagedZonesCreateCall { c := &ManagedZonesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3252,7 +3316,7 @@ func (c *ManagedZonesCreateCall) Header() http.Header { func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3316,7 +3380,7 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, } return ret, nil // { - // "description": "Create a new ManagedZone.", + // "description": "Creates a new ManagedZone.", // "flatPath": "dns/v1/projects/{project}/managedZones", // "httpMethod": "POST", // "id": "dns.managedZones.create", @@ -3362,7 +3426,11 @@ type ManagedZonesDeleteCall struct { header_ http.Header } -// Delete: Delete a previously created ManagedZone. +// Delete: Deletes a previously created ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Delete(project string, managedZone string) *ManagedZonesDeleteCall { c := &ManagedZonesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3406,7 +3474,7 @@ func (c *ManagedZonesDeleteCall) Header() http.Header { func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3441,7 +3509,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Delete a previously created ManagedZone.", + // "description": "Deletes a previously created ManagedZone.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "DELETE", // "id": "dns.managedZones.delete", @@ -3489,7 +3557,11 @@ type ManagedZonesGetCall struct { header_ http.Header } -// Get: Fetch the representation of an existing ManagedZone. +// Get: Fetches the representation of an existing ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZonesGetCall { c := &ManagedZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3543,7 +3615,7 @@ func (c *ManagedZonesGetCall) Header() http.Header { func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3606,7 +3678,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er } return ret, nil // { - // "description": "Fetch the representation of an existing ManagedZone.", + // "description": "Fetches the representation of an existing ManagedZone.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "GET", // "id": "dns.managedZones.get", @@ -3658,8 +3730,10 @@ type ManagedZonesListCall struct { header_ http.Header } -// List: Enumerate ManagedZones that have been created but not yet +// List: Enumerates ManagedZones that have been created but not yet // deleted. +// +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) List(project string) *ManagedZonesListCall { c := &ManagedZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3674,7 +3748,7 @@ func (c *ManagedZonesListCall) DnsName(dnsName string) *ManagedZonesListCall { } // MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how +// of results to be returned. If unspecified, the server decides how // many results to return. func (c *ManagedZonesListCall) MaxResults(maxResults int64) *ManagedZonesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) @@ -3726,7 +3800,7 @@ func (c *ManagedZonesListCall) Header() http.Header { func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3788,7 +3862,7 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi } return ret, nil // { - // "description": "Enumerate ManagedZones that have been created but not yet deleted.", + // "description": "Enumerates ManagedZones that have been created but not yet deleted.", // "flatPath": "dns/v1/projects/{project}/managedZones", // "httpMethod": "GET", // "id": "dns.managedZones.list", @@ -3802,7 +3876,7 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi // "type": "string" // }, // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", // "format": "int32", // "location": "query", // "type": "integer" @@ -3866,7 +3940,11 @@ type ManagedZonesPatchCall struct { header_ http.Header } -// Patch: Apply a partial update to an existing ManagedZone. +// Patch: Applies a partial update to an existing ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Patch(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesPatchCall { c := &ManagedZonesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3911,7 +3989,7 @@ func (c *ManagedZonesPatchCall) Header() http.Header { func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3976,7 +4054,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Apply a partial update to an existing ManagedZone.", + // "description": "Applies a partial update to an existing ManagedZone.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "PATCH", // "id": "dns.managedZones.patch", @@ -4030,7 +4108,11 @@ type ManagedZonesUpdateCall struct { header_ http.Header } -// Update: Update an existing ManagedZone. +// Update: Updates an existing ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Update(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesUpdateCall { c := &ManagedZonesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4075,7 +4157,7 @@ func (c *ManagedZonesUpdateCall) Header() http.Header { func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4140,7 +4222,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Update an existing ManagedZone.", + // "description": "Updates an existing ManagedZone.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", // "httpMethod": "PUT", // "id": "dns.managedZones.update", @@ -4193,7 +4275,9 @@ type PoliciesCreateCall struct { header_ http.Header } -// Create: Create a new Policy +// Create: Creates a new Policy. +// +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Create(project string, policy *Policy) *PoliciesCreateCall { c := &PoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4237,7 +4321,7 @@ func (c *PoliciesCreateCall) Header() http.Header { func (c *PoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4301,7 +4385,7 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { } return ret, nil // { - // "description": "Create a new Policy", + // "description": "Creates a new Policy.", // "flatPath": "dns/v1/projects/{project}/policies", // "httpMethod": "POST", // "id": "dns.policies.create", @@ -4347,8 +4431,12 @@ type PoliciesDeleteCall struct { header_ http.Header } -// Delete: Delete a previously created Policy. Will fail if the policy -// is still being referenced by a network. +// Delete: Deletes a previously created Policy. Fails if the policy is +// still being referenced by a network. +// +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Delete(project string, policy string) *PoliciesDeleteCall { c := &PoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4392,7 +4480,7 @@ func (c *PoliciesDeleteCall) Header() http.Header { func (c *PoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4427,7 +4515,7 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Delete a previously created Policy. Will fail if the policy is still being referenced by a network.", + // "description": "Deletes a previously created Policy. Fails if the policy is still being referenced by a network.", // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "DELETE", // "id": "dns.policies.delete", @@ -4475,7 +4563,11 @@ type PoliciesGetCall struct { header_ http.Header } -// Get: Fetch the representation of an existing Policy. +// Get: Fetches the representation of an existing Policy. +// +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Get(project string, policy string) *PoliciesGetCall { c := &PoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4529,7 +4621,7 @@ func (c *PoliciesGetCall) Header() http.Header { func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4592,7 +4684,7 @@ func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { } return ret, nil // { - // "description": "Fetch the representation of an existing Policy.", + // "description": "Fetches the representation of an existing Policy.", // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "GET", // "id": "dns.policies.get", @@ -4644,7 +4736,9 @@ type PoliciesListCall struct { header_ http.Header } -// List: Enumerate all Policies associated with a project. +// List: Enumerates all Policies associated with a project. +// +// - project: Identifies the project addressed by this request. func (r *PoliciesService) List(project string) *PoliciesListCall { c := &PoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4652,7 +4746,7 @@ func (r *PoliciesService) List(project string) *PoliciesListCall { } // MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how +// of results to be returned. If unspecified, the server decides how // many results to return. func (c *PoliciesListCall) MaxResults(maxResults int64) *PoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) @@ -4704,7 +4798,7 @@ func (c *PoliciesListCall) Header() http.Header { func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4766,7 +4860,7 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon } return ret, nil // { - // "description": "Enumerate all Policies associated with a project.", + // "description": "Enumerates all Policies associated with a project.", // "flatPath": "dns/v1/projects/{project}/policies", // "httpMethod": "GET", // "id": "dns.policies.list", @@ -4775,7 +4869,7 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon // ], // "parameters": { // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", // "format": "int32", // "location": "query", // "type": "integer" @@ -4839,7 +4933,11 @@ type PoliciesPatchCall struct { header_ http.Header } -// Patch: Apply a partial update to an existing Policy. +// Patch: Applies a partial update to an existing Policy. +// +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Patch(project string, policy string, policy2 *Policy) *PoliciesPatchCall { c := &PoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4884,7 +4982,7 @@ func (c *PoliciesPatchCall) Header() http.Header { func (c *PoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4949,7 +5047,7 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp } return ret, nil // { - // "description": "Apply a partial update to an existing Policy.", + // "description": "Applies a partial update to an existing Policy.", // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "PATCH", // "id": "dns.policies.patch", @@ -5003,7 +5101,11 @@ type PoliciesUpdateCall struct { header_ http.Header } -// Update: Update an existing Policy. +// Update: Updates an existing Policy. +// +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Update(project string, policy string, policy2 *Policy) *PoliciesUpdateCall { c := &PoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5048,7 +5150,7 @@ func (c *PoliciesUpdateCall) Header() http.Header { func (c *PoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5113,7 +5215,7 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe } return ret, nil // { - // "description": "Update an existing Policy.", + // "description": "Updates an existing Policy.", // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "PUT", // "id": "dns.policies.update", @@ -5166,7 +5268,9 @@ type ProjectsGetCall struct { header_ http.Header } -// Get: Fetch the representation of an existing Project. +// Get: Fetches the representation of an existing Project. +// +// - project: Identifies the project addressed by this request. func (r *ProjectsService) Get(project string) *ProjectsGetCall { c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5219,7 +5323,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5281,7 +5385,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Fetch the representation of an existing Project.", + // "description": "Fetches the representation of an existing Project.", // "flatPath": "dns/v1/projects/{project}", // "httpMethod": "GET", // "id": "dns.projects.get", @@ -5315,20 +5419,758 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } -// method id "dns.resourceRecordSets.list": +// method id "dns.projects.managedZones.rrsets.create": -type ResourceRecordSetsListCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsManagedZonesRrsetsCreateCall struct { + s *Service + project string + managedZone string + resourcerecordset *ResourceRecordSet + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Enumerate ResourceRecordSets that have been created but not yet -// deleted. +// Create: Creates a new ResourceRecordSet. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *ProjectsManagedZonesRrsetsService) Create(project string, managedZone string, resourcerecordset *ResourceRecordSet) *ProjectsManagedZonesRrsetsCreateCall { + c := &ProjectsManagedZonesRrsetsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.resourcerecordset = resourcerecordset + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ProjectsManagedZonesRrsetsCreateCall) ClientOperationId(clientOperationId string) *ProjectsManagedZonesRrsetsCreateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsManagedZonesRrsetsCreateCall) Fields(s ...googleapi.Field) *ProjectsManagedZonesRrsetsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsManagedZonesRrsetsCreateCall) Context(ctx context.Context) *ProjectsManagedZonesRrsetsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsManagedZonesRrsetsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsManagedZonesRrsetsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcerecordset) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.projects.managedZones.rrsets.create" call. +// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResourceRecordSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsManagedZonesRrsetsCreateCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourceRecordSet{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "httpMethod": "POST", + // "id": "dns.projects.managedZones.rrsets.create", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "request": { + // "$ref": "ResourceRecordSet" + // }, + // "response": { + // "$ref": "ResourceRecordSet" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.projects.managedZones.rrsets.delete": + +type ProjectsManagedZonesRrsetsDeleteCall struct { + s *Service + project string + managedZone string + name string + type_ string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a previously created ResourceRecordSet. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. +// - project: Identifies the project addressed by this request. +// - type: RRSet type. +func (r *ProjectsManagedZonesRrsetsService) Delete(project string, managedZone string, name string, type_ string) *ProjectsManagedZonesRrsetsDeleteCall { + c := &ProjectsManagedZonesRrsetsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.name = name + c.type_ = type_ + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ProjectsManagedZonesRrsetsDeleteCall) ClientOperationId(clientOperationId string) *ProjectsManagedZonesRrsetsDeleteCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsManagedZonesRrsetsDeleteCall) Fields(s ...googleapi.Field) *ProjectsManagedZonesRrsetsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsManagedZonesRrsetsDeleteCall) Context(ctx context.Context) *ProjectsManagedZonesRrsetsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsManagedZonesRrsetsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsManagedZonesRrsetsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "name": c.name, + "type": c.type_, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.projects.managedZones.rrsets.delete" call. +// Exactly one of *ResourceRecordSetsDeleteResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResourceRecordSetsDeleteResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsManagedZonesRrsetsDeleteCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsDeleteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourceRecordSetsDeleteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a previously created ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "httpMethod": "DELETE", + // "id": "dns.projects.managedZones.rrsets.delete", + // "parameterOrder": [ + // "project", + // "managedZone", + // "name", + // "type" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "Fully qualified domain name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "type": { + // "description": "RRSet type.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "response": { + // "$ref": "ResourceRecordSetsDeleteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.projects.managedZones.rrsets.get": + +type ProjectsManagedZonesRrsetsGetCall struct { + s *Service + project string + managedZone string + name string + type_ string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Fetches the representation of an existing ResourceRecordSet. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. +// - project: Identifies the project addressed by this request. +// - type: RRSet type. +func (r *ProjectsManagedZonesRrsetsService) Get(project string, managedZone string, name string, type_ string) *ProjectsManagedZonesRrsetsGetCall { + c := &ProjectsManagedZonesRrsetsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.name = name + c.type_ = type_ + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ProjectsManagedZonesRrsetsGetCall) ClientOperationId(clientOperationId string) *ProjectsManagedZonesRrsetsGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsManagedZonesRrsetsGetCall) Fields(s ...googleapi.Field) *ProjectsManagedZonesRrsetsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsManagedZonesRrsetsGetCall) IfNoneMatch(entityTag string) *ProjectsManagedZonesRrsetsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsManagedZonesRrsetsGetCall) Context(ctx context.Context) *ProjectsManagedZonesRrsetsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsManagedZonesRrsetsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsManagedZonesRrsetsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "name": c.name, + "type": c.type_, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.projects.managedZones.rrsets.get" call. +// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResourceRecordSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsManagedZonesRrsetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourceRecordSet{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetches the representation of an existing ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "httpMethod": "GET", + // "id": "dns.projects.managedZones.rrsets.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "name", + // "type" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "Fully qualified domain name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "type": { + // "description": "RRSet type.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "response": { + // "$ref": "ResourceRecordSet" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.projects.managedZones.rrsets.patch": + +type ProjectsManagedZonesRrsetsPatchCall struct { + s *Service + project string + managedZone string + name string + type_ string + resourcerecordset *ResourceRecordSet + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Applies a partial update to an existing ResourceRecordSet. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. +// - project: Identifies the project addressed by this request. +// - type: RRSet type. +func (r *ProjectsManagedZonesRrsetsService) Patch(project string, managedZone string, name string, type_ string, resourcerecordset *ResourceRecordSet) *ProjectsManagedZonesRrsetsPatchCall { + c := &ProjectsManagedZonesRrsetsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.name = name + c.type_ = type_ + c.resourcerecordset = resourcerecordset + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ProjectsManagedZonesRrsetsPatchCall) ClientOperationId(clientOperationId string) *ProjectsManagedZonesRrsetsPatchCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsManagedZonesRrsetsPatchCall) Fields(s ...googleapi.Field) *ProjectsManagedZonesRrsetsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsManagedZonesRrsetsPatchCall) Context(ctx context.Context) *ProjectsManagedZonesRrsetsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsManagedZonesRrsetsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsManagedZonesRrsetsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcerecordset) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "name": c.name, + "type": c.type_, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.projects.managedZones.rrsets.patch" call. +// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResourceRecordSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsManagedZonesRrsetsPatchCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourceRecordSet{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Applies a partial update to an existing ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "httpMethod": "PATCH", + // "id": "dns.projects.managedZones.rrsets.patch", + // "parameterOrder": [ + // "project", + // "managedZone", + // "name", + // "type" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "Fully qualified domain name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "type": { + // "description": "RRSet type.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "request": { + // "$ref": "ResourceRecordSet" + // }, + // "response": { + // "$ref": "ResourceRecordSet" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.resourceRecordSets.list": + +type ResourceRecordSetsListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerates ResourceRecordSets that you have created but not yet +// deleted. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ResourceRecordSetsService) List(project string, managedZone string) *ResourceRecordSetsListCall { c := &ResourceRecordSetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5337,7 +6179,7 @@ func (r *ResourceRecordSetsService) List(project string, managedZone string) *Re } // MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how +// of results to be returned. If unspecified, the server decides how // many results to return. func (c *ResourceRecordSetsListCall) MaxResults(maxResults int64) *ResourceRecordSetsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) @@ -5404,7 +6246,7 @@ func (c *ResourceRecordSetsListCall) Header() http.Header { func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5467,7 +6309,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource } return ret, nil // { - // "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + // "description": "Enumerates ResourceRecordSets that you have created but not yet deleted.", // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", // "httpMethod": "GET", // "id": "dns.resourceRecordSets.list", @@ -5483,7 +6325,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource // "type": "string" // }, // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", // "format": "int32", // "location": "query", // "type": "integer" diff --git a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json index cbf9e7f3dd7..44c1427a052 100644 --- a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json +++ b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json @@ -3,7 +3,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "description": "See, edit, configure, and delete your Google Cloud Platform data" }, "https://www.googleapis.com/auth/cloud-platform.read-only": { "description": "View your data across Google Cloud Platform services" @@ -426,7 +426,7 @@ } } }, - "revision": "20210124", + "revision": "20210326", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AdminQuotaPolicy": { @@ -1261,7 +1261,7 @@ "type": "object" }, "GoogleApiService": { - "description": "`Service` is the root object of Google service configuration schema. It describes basic information about a service, such as the name and the title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. See each proto message definition for details. Example: type: google.api.Service config_version: 3 name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", + "description": "`Service` is the root object of Google service configuration schema. It describes basic information about a service, such as the name and the title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. See each proto message definition for details. Example: type: google.api.Service name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "GoogleApiService", "properties": { "apis": { @@ -1284,7 +1284,7 @@ "description": "Billing configuration." }, "configVersion": { - "description": "Deprecated. The service config compiler always sets this field to `3`.", + "description": "Obsolete. Do not use. This field has no semantic meaning. The service config compiler always sets this field to `3`.", "format": "uint32", "type": "integer" }, diff --git a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go index dc8b7726df0..f4cfd44e07b 100644 --- a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go +++ b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go @@ -83,7 +83,7 @@ const mtlsBasePath = "https://serviceusage.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( - // View and manage your data across Google Cloud Platform services + // See, edit, configure, and delete your Google Cloud Platform data CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View your data across Google Cloud Platform services @@ -1700,8 +1700,8 @@ func (s *GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { // sub-sections. Each sub-section is either a proto message or a // repeated proto message that configures a specific aspect, such as // auth. See each proto message definition for details. Example: type: -// google.api.Service config_version: 3 name: calendar.googleapis.com -// title: Google Calendar API apis: - name: google.calendar.v3.Calendar +// google.api.Service name: calendar.googleapis.com title: Google +// Calendar API apis: - name: google.calendar.v3.Calendar // authentication: providers: - id: google_calendar_auth jwks_uri: // https://www.googleapis.com/oauth2/v1/certs issuer: // https://securetoken.google.com rules: - selector: "*" requirements: @@ -1724,8 +1724,8 @@ type GoogleApiService struct { // Billing: Billing configuration. Billing *Billing `json:"billing,omitempty"` - // ConfigVersion: Deprecated. The service config compiler always sets - // this field to `3`. + // ConfigVersion: Obsolete. Do not use. This field has no semantic + // meaning. The service config compiler always sets this field to `3`. ConfigVersion int64 `json:"configVersion,omitempty"` // Context: Context configuration. @@ -4206,6 +4206,8 @@ type OperationsCancelCall struct { // deleted; instead, it becomes an operation with an Operation.error // value with a google.rpc.Status.code of 1, corresponding to // `Code.CANCELLED`. +// +// - name: The name of the operation resource to be cancelled. func (r *OperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *OperationsCancelCall { c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4240,7 +4242,7 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4349,6 +4351,8 @@ type OperationsDeleteCall struct { // the client is no longer interested in the operation result. It does // not cancel the operation. If the server doesn't support this method, // it returns `google.rpc.Code.UNIMPLEMENTED`. +// +// - name: The name of the operation resource to be deleted. func (r *OperationsService) Delete(name string) *OperationsDeleteCall { c := &OperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4382,7 +4386,7 @@ func (c *OperationsDeleteCall) Header() http.Header { func (c *OperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4483,6 +4487,8 @@ type OperationsGetCall struct { // Get: Gets the latest state of a long-running operation. Clients can // use this method to poll the operation result at intervals as // recommended by the API service. +// +// - name: The name of the operation resource. func (r *OperationsService) Get(name string) *OperationsGetCall { c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4526,7 +4532,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4706,7 +4712,7 @@ func (c *OperationsListCall) Header() http.Header { func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4841,6 +4847,10 @@ type ServicesBatchEnableCall struct { // atomic: if enabling any service fails, then the entire batch fails, // and no state changes occur. To enable a single service, use the // `EnableService` method instead. +// +// - parent: Parent to enable services on. An example name would be: +// `projects/123` where `123` is the project number. The +// `BatchEnableServices` method currently only supports projects. func (r *ServicesService) BatchEnable(parent string, batchenableservicesrequest *BatchEnableServicesRequest) *ServicesBatchEnableCall { c := &ServicesBatchEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -4875,7 +4885,7 @@ func (c *ServicesBatchEnableCall) Header() http.Header { func (c *ServicesBatchEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4983,6 +4993,12 @@ type ServicesBatchGetCall struct { // BatchGet: Returns the service configurations and enabled states for a // given list of services. +// +// - parent: Parent to retrieve services from. If this is set, the +// parent of all of the services specified in `names` must match this +// field. An example name would be: `projects/123` where `123` is the +// project number. The `BatchGetServices` method currently only +// supports projects. func (r *ServicesService) BatchGet(parent string) *ServicesBatchGetCall { c := &ServicesBatchGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -5036,7 +5052,7 @@ func (c *ServicesBatchGetCall) Header() http.Header { func (c *ServicesBatchGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5149,6 +5165,12 @@ type ServicesDisableCall struct { // disable method on a service that is not currently enabled. Callers // will receive a `FAILED_PRECONDITION` status if the target service is // not currently enabled. +// +// - name: Name of the consumer and service to disable the service on. +// The enable and disable methods currently only support projects. An +// example name would be: +// `projects/123/services/serviceusage.googleapis.com` where `123` is +// the project number. func (r *ServicesService) Disable(name string, disableservicerequest *DisableServiceRequest) *ServicesDisableCall { c := &ServicesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5183,7 +5205,7 @@ func (c *ServicesDisableCall) Header() http.Header { func (c *ServicesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5290,6 +5312,13 @@ type ServicesEnableCall struct { } // Enable: Enable a service so that it can be used with a project. +// +// - name: Name of the consumer and service to enable the service on. +// The `EnableService` and `DisableService` methods currently only +// support projects. Enabling a service requires that the service is +// public or is shared with the user enabling the service. An example +// name would be: `projects/123/services/serviceusage.googleapis.com` +// where `123` is the project number. func (r *ServicesService) Enable(name string, enableservicerequest *EnableServiceRequest) *ServicesEnableCall { c := &ServicesEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5324,7 +5353,7 @@ func (c *ServicesEnableCall) Header() http.Header { func (c *ServicesEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5432,6 +5461,11 @@ type ServicesGetCall struct { // Get: Returns the service configuration and enabled state for a given // service. +// +// - name: Name of the consumer and service to get the `ConsumerState` +// for. An example name would be: +// `projects/123/services/serviceusage.googleapis.com` where `123` is +// the project number. func (r *ServicesService) Get(name string) *ServicesGetCall { c := &ServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5475,7 +5509,7 @@ func (c *ServicesGetCall) Header() http.Header { func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5587,6 +5621,9 @@ type ServicesListCall struct { // should use Cloud Asset Inventory API // (https://cloud.google.com/asset-inventory/docs/apis), which provides // higher throughput and richer filtering capability. +// +// - parent: Parent to search for services on. An example name would be: +// `projects/123` where `123` is the project number. func (r *ServicesService) List(parent string) *ServicesListCall { c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -5654,7 +5691,7 @@ func (c *ServicesListCall) Header() http.Header { func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210406") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 788759bde4b..ab531f4c0b8 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -101,6 +101,9 @@ type SubConn interface { // a new connection will be created. // // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -143,6 +146,13 @@ type ClientConn interface { // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has // changed. diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index e0d34288ccf..c883efa0bbf 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" + "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -41,7 +42,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]balancer.SubConn), + subConns: make(map[resolver.Address]subConnInfo), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -57,6 +58,11 @@ func (bb *baseBuilder) Name() string { return bb.name } +type subConnInfo struct { + subConn balancer.SubConn + attrs *attributes.Attributes +} + type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -64,7 +70,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn // `attributes` is stripped from the keys of this map (the addresses) + subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -114,7 +120,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { aNoAttrs := a aNoAttrs.Attributes = nil addrsSet[aNoAttrs] = struct{}{} - if sc, ok := b.subConns[aNoAttrs]; !ok { + if scInfo, ok := b.subConns[aNoAttrs]; !ok { // a is a new address (not existing in b.subConns). // // When creating SubConn, the original address with attributes is @@ -125,7 +131,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = sc + b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} b.scStates[sc] = connectivity.Idle sc.Connect() } else { @@ -135,13 +141,15 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // The SubConn does a reflect.DeepEqual of the new and old // addresses. So this is a noop if the current address is the same // as the old one (including attributes). - sc.UpdateAddresses([]resolver.Address{a}) + scInfo.attrs = a.Attributes + b.subConns[aNoAttrs] = scInfo + b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, sc := range b.subConns { + for a, scInfo := range b.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(sc) + b.cc.RemoveSubConn(scInfo.subConn) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -184,9 +192,10 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[sc] = SubConnInfo{Address: addr} + for addr, scInfo := range b.subConns { + if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { + addr.Attributes = scInfo.attrs + readySCs[scInfo.subConn] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 11e592aabb0..41061d6d3dc 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -163,6 +163,14 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index e1230fdd358..3009b35afe7 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -21,6 +21,8 @@ package proto import ( + "fmt" + "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" ) @@ -36,11 +38,19 @@ func init() { type codec struct{} func (codec) Marshal(v interface{}) ([]byte, error) { - return proto.Marshal(v.(proto.Message)) + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) } func (codec) Unmarshal(data []byte, v interface{}) error { - return proto.Unmarshal(data, v.(proto.Message)) + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) } func (codec) Name() string { diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index cab74e55774..b177cfa66df 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -4,7 +4,7 @@ go 1.11 require ( github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 - github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.2 github.com/google/go-cmp v0.5.0 diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 77ee70b4435..bb25cd49156 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -12,8 +12,8 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index e6990040056..5e7f36703d4 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -24,6 +24,7 @@ import ( "sync" "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -51,6 +52,74 @@ type RPCConfig struct { Context context.Context MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is unimplementable; do not use. +type ServerInterceptor interface { + notDefined() } type csKeyType string diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 8902b7f90d9..d5bbe720db5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -414,6 +414,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { buf: newRecvBuffer(), headerChan: make(chan struct{}), contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -832,6 +833,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) // This will unblock write. close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } } // Close kicks off the shutdown process of the transport. This should be called diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0cf1cc320bb..7c6c89d4f9b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -26,6 +26,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "sync" "sync/atomic" @@ -402,6 +403,20 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID + if state.data.httpMethod != http.MethodPost { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + } + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + s.cancel() + return false + } t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 7e41d1183f9..c7dee140cf1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -111,6 +111,7 @@ type parsedHeaderData struct { timeoutSet bool timeout time.Duration method string + httpMethod string // key-value metadata map from the peer. mdata map[string][]string statsTags []byte @@ -363,6 +364,8 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { } d.data.statsTrace = v d.addMetadata(f.Name, string(v)) + case ":method": + d.data.httpMethod = f.Value default: if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { break diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 9c8f79cb4b2..5cf7c5f80fe 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -241,6 +241,7 @@ type Stream struct { ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) method string // the associated RPC method of the stream recvCompress string @@ -611,6 +612,8 @@ type CallHdr struct { ContentSubtype string PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished } // ClientTransport is the common interface for all gRPC client-side transport diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 56e33f6c76b..b858c2a5e63 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -84,7 +84,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) b.sc.Connect() } else { - b.sc.UpdateAddresses(cs.ResolverState.Addresses) + b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) b.sc.Connect() } return nil diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index ed52187df66..fc6725b89f8 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -40,6 +40,9 @@ echo "go install cmd/protoc-gen-go-grpc" echo "git clone https://github.com/grpc/grpc-proto" git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + # Pull in code.proto as a proto dependency mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" @@ -87,6 +90,7 @@ for src in ${SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ -I${WORKDIR}/istio \ ${src} done @@ -97,6 +101,7 @@ for src in ${LEGACY_SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ -I${WORKDIR}/istio \ ${src} done diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index ea5bb8d0c2b..c0a1208f2f3 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -888,8 +888,7 @@ type channelzData struct { // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 7. // -// Older versions are kept for compatibility. They may be removed if -// compatibility cannot be maintained. +// Older versions are kept for compatibility. // // These constants should not be referenced from any other code. const ( diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 01e182c306c..54d187186b8 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -73,9 +73,11 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced from this -// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a -// Status is returned with codes.Unknown and the original error message. +// FromError returns a Status representing err if it was produced by this +// package or has a method `GRPCStatus() *Status`. +// If err is nil, a Status is returned with codes.OK and no message. +// Otherwise, ok is false and a Status is returned with codes.Unknown and +// the original error message. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index eda1248d60c..77d25742cc3 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -166,7 +166,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - c := defaultCallInfo() // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. if err := cc.waitForResolvedAddrs(ctx); err != nil { @@ -175,18 +174,40 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - rpcConfig, err := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method}) + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { - return nil, status.Convert(err).Err() + return nil, toRPCErr(err) } + if rpcConfig != nil { if rpcConfig.Context != nil { ctx = rpcConfig.Context } mc = rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } } + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -223,6 +244,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Host: cc.authority, Method: method, ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, } // Set our outgoing compression according to the UseCompressor CallOption, if diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 685b91c7056..c3b87eb5ac8 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.35.0" +const Version = "1.37.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index b41df6dc860..dcd939bb390 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -28,7 +28,8 @@ cleanup() { } trap cleanup EXIT -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version if [[ "$1" = "-install" ]]; then # Check for module support @@ -107,7 +108,7 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" -golint ./... 2>&1 | not grep -vE "\.pb\.go:" +golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" go vet -all ./... misspell -error . @@ -141,8 +142,11 @@ not grep -Fv '.CredsBundle .NewAddress .NewServiceConfig .Type is deprecated: use Attributes +BuildVersion is deprecated balancer.ErrTransientFailure balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated grpc.CallCustomCodec grpc.Code grpc.Compressor @@ -164,13 +168,7 @@ grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier info.SecurityVersion -resolver.Backend -resolver.GRPCLB -extDesc.Filename is deprecated -BuildVersion is deprecated -github.com/golang/protobuf/jsonpb is deprecated proto is deprecated -xxx_messageInfo_ proto.InternalMessageInfo is deprecated proto.EnumName is deprecated proto.ErrInternalBadWireType is deprecated @@ -184,7 +182,12 @@ proto.RegisterExtension is deprecated proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated' "${SC_OUT}" +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" # - special golint on package comments. lint_package_comment_per_package() { diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index cab95a42735..8fb1d9e086a 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "strings" "unicode/utf8" "google.golang.org/protobuf/internal/encoding/messageset" @@ -23,6 +22,7 @@ import ( ) // Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) } @@ -51,8 +51,9 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message using options in -// UnmarshalOptions object. +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { return o.unmarshal(b, m) } @@ -158,21 +159,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch tok.NameKind() { case text.IdentName: name = pref.Name(tok.IdentName()) - fd = fieldDescs.ByName(name) - if fd == nil { - // The proto name of a group field is in all lowercase, - // while the textproto field name is the group message name. - gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { - fd = gd - } - } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { - fd = nil // reset since field name is actually the message name - } + fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true @@ -269,15 +260,6 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return nil } -// findExtension returns protoreflect.ExtensionType from the Resolver if found. -func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { - xt, err := d.opts.Resolver.FindExtensionByName(xtName) - if err == nil { - return xt, nil - } - return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) -} - // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 0877d71c519..8d5304dc5b3 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "sort" "strconv" "unicode/utf8" @@ -16,10 +15,11 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -169,35 +169,15 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { // If unable to expand, continue on to marshal Any as a regular message. } - // Marshal known fields. - fieldDescs := messageDesc.Fields() - size := fieldDescs.Len() - for i := 0; i < size; { - fd := fieldDescs.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - - if fd == nil || !m.Has(fd) { - continue - } - - name := fd.Name() - // Use type name for group field name. - if fd.Kind() == pref.GroupKind { - name = fd.Message().Name() - } - val := m.Get(fd) - if err := e.marshalField(string(name), val, fd); err != nil { - return err + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false } - } - - // Marshal extensions. - if err := e.marshalExtensions(m); err != nil { + return true + }) + if err != nil { return err } @@ -290,7 +270,7 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto // marshalMap marshals the given protoreflect.Map as multiple name-value fields. func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { var err error - mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -311,48 +291,6 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) return err } -// marshalExtensions marshals extension fields. -func (e encoder) marshalExtensions(m pref.Message) error { - type entry struct { - key string - value pref.Value - desc pref.FieldDescriptor - } - - // Get a sorted list based on field key first. - var entries []entry - m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - if !fd.IsExtension() { - return true - } - // For MessageSet extensions, the name used is the parent message. - name := fd.FullName() - if messageset.IsMessageSetExtension(fd) { - name = name.Parent() - } - entries = append(entries, entry{ - key: string(name), - value: v, - desc: fd, - }) - return true - }) - // Sort extensions lexicographically. - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Write out sorted list. - for _, entry := range entries { - // Extension field name is the proto field name enclosed in []. - name := "[" + entry.key + "]" - if err := e.marshalField(name, entry.value, entry.desc); err != nil { - return err - } - } - return nil -} - // marshalUnknown parses the given []byte and marshals fields out. // This function assumes proper encoding in the given []byte. func (e encoder) marshalUnknown(b []byte) { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index e7af0fe0de2..360c63329d4 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -42,6 +42,8 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { name = "FileImports" case pref.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() } start, end = name+"{", "}" } diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go index a904dd1f91a..49c8676d484 100644 --- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -26,6 +26,14 @@ func Bool() bool { return randSeed%2 == 1 } +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + // randSeed is a best-effort at an approximate hash of the Go binary. var randSeed = binaryHash() diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index b1eeea50797..c1866f3c1a7 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -11,10 +11,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" ) -// The MessageSet wire format is equivalent to a message defiend as follows, +// The MessageSet wire format is equivalent to a message defined as follows, // where each Item defines an extension field with a field number of 'type_id' // and content of 'message'. MessageSet extensions must be non-repeated message // fields. @@ -48,33 +47,17 @@ func IsMessageSet(md pref.MessageDescriptor) bool { return ok && xmd.IsMessageSet() } -// IsMessageSetExtension reports this field extends a MessageSet. +// IsMessageSetExtension reports this field properly extends a MessageSet. func IsMessageSetExtension(fd pref.FieldDescriptor) bool { - if fd.Name() != ExtensionName { + switch { + case fd.Name() != ExtensionName: return false - } - if fd.FullName().Parent() != fd.Message().FullName() { + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): return false } - return IsMessageSet(fd.ContainingMessage()) -} - -// FindMessageSetExtension locates a MessageSet extension field by name. -// In text and JSON formats, the extension name used is the message itself. -// The extension field name is derived by appending ExtensionName. -func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { - name := s.Append(ExtensionName) - xt, err := r.FindExtensionByName(name) - if err != nil { - if err == preg.NotFound { - return nil, err - } - return nil, errors.Wrap(err, "%q", name) - } - if !IsMessageSetExtension(xt.TypeDescriptor()) { - return nil, preg.NotFound - } - return xt, nil + return true } // SizeField returns the size of a MessageSet item field containing an extension diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 16c02d7b627..38f1931c6fd 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -104,7 +104,7 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { - f.L1.JSONName.Init(jsonName) + f.L1.StringName.InitJSON(jsonName) } case s == "packed": f.L1.HasPacked = true diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index c4ba1c598fb..aa66bdd06a3 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -32,7 +32,6 @@ type Encoder struct { encoderState indent string - newline string // set to "\n" if len(indent) > 0 delims [2]byte outputASCII bool } @@ -61,7 +60,6 @@ func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, erro return nil, errors.New("indent may only be composed of space and tab characters") } e.indent = indent - e.newline = "\n" } switch delims { case [2]byte{0, 0}: @@ -126,7 +124,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // are used to represent both the proto string and bytes type. r = rune(in[0]) fallthrough - case r < ' ' || r == '"' || r == '\\': + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: out = append(out, '\\') switch r { case '"', '\\': @@ -143,7 +141,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { out = strconv.AppendUint(out, uint64(r), 16) } in = in[n:] - case outputASCII && r >= utf8.RuneSelf: + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): out = append(out, '\\') if r <= math.MaxUint16 { out = append(out, 'u') @@ -168,7 +166,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // escaping. If no characters need escaping, this returns the input length. func indexNeedEscapeInString(s string) int { for i := 0; i < len(s); i++ { - if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { return i } } diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go deleted file mode 100644 index 517c4e2a041..00000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldsort defines an ordering of fields. -// -// The ordering defined by this package matches the historic behavior of the proto -// package, placing extensions first and oneofs last. -// -// There is no guarantee about stability of the wire encoding, and users should not -// depend on the order defined in this package as it is subject to change without -// notice. -package fieldsort - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Less returns true if field a comes before field j in ordered wire marshal output. -func Less(a, b protoreflect.FieldDescriptor) bool { - ea := a.IsExtension() - eb := b.IsExtension() - oa := a.ContainingOneof() - ob := b.ContainingOneof() - switch { - case ea != eb: - return ea - case oa != nil && ob != nil: - if oa == ob { - return a.Number() < b.Number() - } - return oa.Index() < ob.Index() - case oa != nil && !oa.IsSynthetic(): - return false - case ob != nil && !ob.IsSynthetic(): - return true - default: - return a.Number() < b.Number() - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index d02d770c984..b293b694736 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. // Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. package filedesc import ( diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 9385126fba6..98ab142aeee 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" @@ -99,15 +100,6 @@ func (fd *File) lazyInitOnce() { fd.mu.Unlock() } -// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code -// to be able to retrieve the raw descriptor. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (fd *File) ProtoLegacyRawDesc() []byte { - return fd.builder.RawDescriptor -} - // GoPackagePath is a pseudo-internal API for determining the Go package path // that this file descriptor is declared in. // @@ -207,7 +199,7 @@ type ( Number pref.FieldNumber Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers Kind pref.Kind - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions HasPacked bool // promoted from google.protobuf.FieldOptions @@ -277,8 +269,9 @@ func (fd *Field) Options() pref.ProtoMessage { func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } -func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } @@ -373,7 +366,7 @@ type ( } ExtensionL2 struct { Options func() pref.ProtoMessage - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue @@ -391,8 +384,9 @@ func (xd *Extension) Options() pref.ProtoMessage { func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } -func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional @@ -506,27 +500,50 @@ func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syn func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} -type jsonName struct { - has bool - once sync.Once - name string +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string } -// Init initializes the name. It is exported for use by other internal packages. -func (js *jsonName) Init(s string) { - js.has = true - js.name = s +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name } -func (js *jsonName) get(fd pref.FieldDescriptor) string { - if !js.has { - js.once.Do(func() { - js.name = strs.JSONCamelCase(string(fd.Name())) - }) - } - return js.name +func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if fd.Kind() == pref.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s } +func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } + func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e672233e77e..198451e3ec9 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -451,7 +451,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_JsonName_field_number: - fd.L1.JSONName.Init(sb.MakeString(v)) + fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: @@ -551,7 +551,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] switch num { case genid.FieldDescriptorProto_JsonName_field_number: - xd.L2.JSONName.Init(sb.MakeString(v)) + xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index c876cd34d70..aa294fff99a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -6,9 +6,12 @@ package filedesc import ( "fmt" + "math" "sort" "sync" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/errors" @@ -245,6 +248,7 @@ type OneofFields struct { once sync.Once byName map[pref.Name]pref.FieldDescriptor // protected by once byJSON map[string]pref.FieldDescriptor // protected by once + byText map[string]pref.FieldDescriptor // protected by once byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once } @@ -252,6 +256,7 @@ func (p *OneofFields) Len() int { return func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} @@ -261,11 +266,13 @@ func (p *OneofFields) lazyInit() *OneofFields { if len(p.List) > 0 { p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f p.byNum[f.Number()] = f } } @@ -274,9 +281,170 @@ func (p *OneofFields) lazyInit() *OneofFields { } type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. List []pref.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File pref.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return pref.SourceLocation{} +} +func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return pref.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case pref.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case pref.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.FieldDescriptor: + isExtension := desc.(pref.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return pref.SourceLocation{} + } + } else { + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return pref.SourceLocation{} + } + } + case pref.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return pref.SourceLocation{} + } + case pref.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return pref.SourceLocation{} + } + case pref.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return pref.SourceLocation{} + } + default: + return pref.SourceLocation{} + } + } } +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p pref.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 6a8825e8027..30db19fdc75 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -142,6 +142,7 @@ type Fields struct { once sync.Once byName map[protoreflect.Name]*Field // protected by once byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once byNum map[protoreflect.FieldNumber]*Field // protected by once } @@ -163,6 +164,12 @@ func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { } return nil } +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { if d := p.lazyInit().byNum[n]; d != nil { return d @@ -178,6 +185,7 @@ func (p *Fields) lazyInit() *Fields { if len(p.List) > 0 { p.byName = make(map[protoreflect.Name]*Field, len(p.List)) p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) for i := range p.List { d := &p.List[i] @@ -187,6 +195,9 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byJSON[d.JSONName()]; !ok { p.byJSON[d.JSONName()] = d } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index b5974528db6..abee5f30e9f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -167,7 +167,7 @@ func (Export) MessageTypeOf(m message) pref.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), "") + return legacyLoadMessageType(reflect.TypeOf(m), "") } // MessageStringOf returns the message value as a string, diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index c00744d385c..cb4b482d166 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -10,6 +10,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -20,6 +21,7 @@ type errInvalidUTF8 struct{} func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } // initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. // @@ -242,7 +244,7 @@ func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if p.Elem().IsNil() { p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) @@ -276,7 +278,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: v, @@ -420,7 +422,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: b, @@ -494,7 +496,7 @@ func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderF } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } m := reflect.New(f.mi.GoReflectType.Elem()).Interface() mp := pointerOfIface(m) @@ -550,7 +552,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -613,7 +615,7 @@ func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wt } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -681,7 +683,7 @@ func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wt } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -767,7 +769,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index ff198d0a153..1a509b63ebc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -15,13 +15,13 @@ import ( ) // sizeBool returns the size of wire encoding a bool pointer as a Bool. -func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBool wire encodes a bool pointer as a Bool. -func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -29,7 +29,7 @@ func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byt } // consumeBool wire decodes a bool pointer as a Bool. -func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -45,7 +45,7 @@ func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bool() = protowire.DecodeBool(v) out.n = n @@ -61,7 +61,7 @@ var coderBool = pointerCoderFuncs{ // sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. // The zero value is not encoded. -func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() if v == false { return 0 @@ -71,7 +71,7 @@ func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { // appendBoolNoZero wire encodes a bool pointer as a Bool. // The zero value is not encoded. -func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() if v == false { return b, nil @@ -90,14 +90,14 @@ var coderBoolNoZero = pointerCoderFuncs{ // sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. // It panics if the pointer is nil. -func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.BoolPtr() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBoolPtr wire encodes a *bool pointer as a Bool. // It panics if the pointer is nil. -func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.BoolPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -105,7 +105,7 @@ func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeBoolPtr wire decodes a *bool pointer as a Bool. -func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -121,7 +121,7 @@ func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.BoolPtr() if *vp == nil { @@ -140,7 +140,7 @@ var coderBoolPtr = pointerCoderFuncs{ } // sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. -func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) @@ -149,7 +149,7 @@ func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBoolSlice encodes a []bool pointer as a repeated Bool. -func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -159,13 +159,13 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. -func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -180,7 +180,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeBool(v)) b = b[n:] @@ -204,7 +204,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeBool(v)) out.n = n @@ -219,7 +219,7 @@ var coderBoolSlice = pointerCoderFuncs{ } // sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. -func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() if len(s) == 0 { return 0 @@ -232,7 +232,7 @@ func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size i } // appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. -func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() if len(s) == 0 { return b, nil @@ -257,19 +257,19 @@ var coderBoolPackedSlice = pointerCoderFuncs{ } // sizeBoolValue returns the size of wire encoding a bool value as a Bool. -func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) } // appendBoolValue encodes a bool value as a Bool. -func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) return b, nil } // consumeBoolValue decodes a bool value as a Bool. -func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -285,7 +285,7 @@ func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil @@ -299,7 +299,7 @@ var coderBoolValue = valueCoderFuncs{ } // sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. -func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -309,7 +309,7 @@ func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendBoolSliceValue encodes a []bool value as a repeated Bool. -func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -320,12 +320,12 @@ func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. -func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -340,7 +340,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) b = b[n:] @@ -363,7 +363,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) out.n = n @@ -378,7 +378,7 @@ var coderBoolSliceValue = valueCoderFuncs{ } // sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. -func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -393,7 +393,7 @@ func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. -func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -421,19 +421,19 @@ var coderBoolPackedSliceValue = valueCoderFuncs{ } // sizeEnumValue returns the size of wire encoding a value as a Enum. -func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Enum())) } // appendEnumValue encodes a value as a Enum. -func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Enum())) return b, nil } // consumeEnumValue decodes a value as a Enum. -func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -449,7 +449,7 @@ func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil @@ -463,7 +463,7 @@ var coderEnumValue = valueCoderFuncs{ } // sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. -func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -473,7 +473,7 @@ func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendEnumSliceValue encodes a [] value as a repeated Enum. -func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -484,12 +484,12 @@ func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeEnumSliceValue wire decodes a [] value as a repeated Enum. -func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -504,7 +504,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) b = b[n:] @@ -527,7 +527,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) out.n = n @@ -542,7 +542,7 @@ var coderEnumSliceValue = valueCoderFuncs{ } // sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. -func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -557,7 +557,7 @@ func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. -func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -585,13 +585,13 @@ var coderEnumPackedSliceValue = valueCoderFuncs{ } // sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. -func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32 wire encodes a int32 pointer as a Int32. -func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -599,7 +599,7 @@ func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt32 wire decodes a int32 pointer as a Int32. -func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -615,7 +615,7 @@ func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -631,7 +631,7 @@ var coderInt32 = pointerCoderFuncs{ // sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. // The zero value is not encoded. -func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -641,7 +641,7 @@ func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt32NoZero wire encodes a int32 pointer as a Int32. // The zero value is not encoded. -func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -660,14 +660,14 @@ var coderInt32NoZero = pointerCoderFuncs{ // sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. // It panics if the pointer is nil. -func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32Ptr wire encodes a *int32 pointer as a Int32. // It panics if the pointer is nil. -func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -675,7 +675,7 @@ func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt32Ptr wire decodes a *int32 pointer as a Int32. -func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -691,7 +691,7 @@ func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -710,7 +710,7 @@ var coderInt32Ptr = pointerCoderFuncs{ } // sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. -func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -719,7 +719,7 @@ func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt32Slice encodes a []int32 pointer as a repeated Int32. -func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -729,13 +729,13 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. -func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -750,7 +750,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -774,7 +774,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -789,7 +789,7 @@ var coderInt32Slice = pointerCoderFuncs{ } // sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. -func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -802,7 +802,7 @@ func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. -func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -827,19 +827,19 @@ var coderInt32PackedSlice = pointerCoderFuncs{ } // sizeInt32Value returns the size of wire encoding a int32 value as a Int32. -func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) } // appendInt32Value encodes a int32 value as a Int32. -func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(int32(v.Int()))) return b, nil } // consumeInt32Value decodes a int32 value as a Int32. -func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -855,7 +855,7 @@ func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -869,7 +869,7 @@ var coderInt32Value = valueCoderFuncs{ } // sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. -func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -879,7 +879,7 @@ func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt32SliceValue encodes a []int32 value as a repeated Int32. -func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -890,12 +890,12 @@ func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. -func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -910,7 +910,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -933,7 +933,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -948,7 +948,7 @@ var coderInt32SliceValue = valueCoderFuncs{ } // sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. -func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -963,7 +963,7 @@ func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. -func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -991,13 +991,13 @@ var coderInt32PackedSliceValue = valueCoderFuncs{ } // sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. -func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32 wire encodes a int32 pointer as a Sint32. -func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1005,7 +1005,7 @@ func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint32 wire decodes a int32 pointer as a Sint32. -func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1021,7 +1021,7 @@ func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) out.n = n @@ -1037,7 +1037,7 @@ var coderSint32 = pointerCoderFuncs{ // sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. // The zero value is not encoded. -func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -1047,7 +1047,7 @@ func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint32NoZero wire encodes a int32 pointer as a Sint32. // The zero value is not encoded. -func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -1066,14 +1066,14 @@ var coderSint32NoZero = pointerCoderFuncs{ // sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32Ptr wire encodes a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1081,7 +1081,7 @@ func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. -func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1097,7 +1097,7 @@ func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -1116,7 +1116,7 @@ var coderSint32Ptr = pointerCoderFuncs{ } // sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. -func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) @@ -1125,7 +1125,7 @@ func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint32Slice encodes a []int32 pointer as a repeated Sint32. -func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1135,13 +1135,13 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. -func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1156,7 +1156,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) b = b[n:] @@ -1180,7 +1180,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) out.n = n @@ -1195,7 +1195,7 @@ var coderSint32Slice = pointerCoderFuncs{ } // sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. -func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -1208,7 +1208,7 @@ func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. -func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -1233,19 +1233,19 @@ var coderSint32PackedSlice = pointerCoderFuncs{ } // sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. -func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) } // appendSint32Value encodes a int32 value as a Sint32. -func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) return b, nil } // consumeSint32Value decodes a int32 value as a Sint32. -func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1261,7 +1261,7 @@ func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil @@ -1275,7 +1275,7 @@ var coderSint32Value = valueCoderFuncs{ } // sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. -func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1285,7 +1285,7 @@ func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint32SliceValue encodes a []int32 value as a repeated Sint32. -func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1296,12 +1296,12 @@ func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. -func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1316,7 +1316,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) b = b[n:] @@ -1339,7 +1339,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) out.n = n @@ -1354,7 +1354,7 @@ var coderSint32SliceValue = valueCoderFuncs{ } // sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. -func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1369,7 +1369,7 @@ func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. -func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1397,13 +1397,13 @@ var coderSint32PackedSliceValue = valueCoderFuncs{ } // sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. -func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32 wire encodes a uint32 pointer as a Uint32. -func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1411,7 +1411,7 @@ func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint32 wire decodes a uint32 pointer as a Uint32. -func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1427,7 +1427,7 @@ func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = uint32(v) out.n = n @@ -1443,7 +1443,7 @@ var coderUint32 = pointerCoderFuncs{ // sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. // The zero value is not encoded. -func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -1453,7 +1453,7 @@ func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint32NoZero wire encodes a uint32 pointer as a Uint32. // The zero value is not encoded. -func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -1472,14 +1472,14 @@ var coderUint32NoZero = pointerCoderFuncs{ // sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1487,7 +1487,7 @@ func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. -func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1503,7 +1503,7 @@ func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -1522,7 +1522,7 @@ var coderUint32Ptr = pointerCoderFuncs{ } // sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. -func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1531,7 +1531,7 @@ func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. -func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1541,13 +1541,13 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. -func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1562,7 +1562,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, uint32(v)) b = b[n:] @@ -1586,7 +1586,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, uint32(v)) out.n = n @@ -1601,7 +1601,7 @@ var coderUint32Slice = pointerCoderFuncs{ } // sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. -func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -1614,7 +1614,7 @@ func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. -func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -1639,19 +1639,19 @@ var coderUint32PackedSlice = pointerCoderFuncs{ } // sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. -func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) } // appendUint32Value encodes a uint32 value as a Uint32. -func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) return b, nil } // consumeUint32Value decodes a uint32 value as a Uint32. -func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1667,7 +1667,7 @@ func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -1681,7 +1681,7 @@ var coderUint32Value = valueCoderFuncs{ } // sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. -func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1691,7 +1691,7 @@ func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. -func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1702,12 +1702,12 @@ func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. -func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1722,7 +1722,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -1745,7 +1745,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -1760,7 +1760,7 @@ var coderUint32SliceValue = valueCoderFuncs{ } // sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. -func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1775,7 +1775,7 @@ func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. -func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1803,13 +1803,13 @@ var coderUint32PackedSliceValue = valueCoderFuncs{ } // sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. -func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64 wire encodes a int64 pointer as a Int64. -func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1817,7 +1817,7 @@ func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt64 wire decodes a int64 pointer as a Int64. -func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1833,7 +1833,7 @@ func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -1849,7 +1849,7 @@ var coderInt64 = pointerCoderFuncs{ // sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. // The zero value is not encoded. -func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -1859,7 +1859,7 @@ func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt64NoZero wire encodes a int64 pointer as a Int64. // The zero value is not encoded. -func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -1878,14 +1878,14 @@ var coderInt64NoZero = pointerCoderFuncs{ // sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. // It panics if the pointer is nil. -func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64Ptr wire encodes a *int64 pointer as a Int64. // It panics if the pointer is nil. -func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1893,7 +1893,7 @@ func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt64Ptr wire decodes a *int64 pointer as a Int64. -func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1909,7 +1909,7 @@ func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -1928,7 +1928,7 @@ var coderInt64Ptr = pointerCoderFuncs{ } // sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. -func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1937,7 +1937,7 @@ func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt64Slice encodes a []int64 pointer as a repeated Int64. -func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1947,13 +1947,13 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. -func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1968,7 +1968,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -1992,7 +1992,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -2007,7 +2007,7 @@ var coderInt64Slice = pointerCoderFuncs{ } // sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. -func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2020,7 +2020,7 @@ func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. -func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2045,19 +2045,19 @@ var coderInt64PackedSlice = pointerCoderFuncs{ } // sizeInt64Value returns the size of wire encoding a int64 value as a Int64. -func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Int())) } // appendInt64Value encodes a int64 value as a Int64. -func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Int())) return b, nil } // consumeInt64Value decodes a int64 value as a Int64. -func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2073,7 +2073,7 @@ func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -2087,7 +2087,7 @@ var coderInt64Value = valueCoderFuncs{ } // sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. -func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2097,7 +2097,7 @@ func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt64SliceValue encodes a []int64 value as a repeated Int64. -func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2108,12 +2108,12 @@ func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. -func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2128,7 +2128,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -2151,7 +2151,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -2166,7 +2166,7 @@ var coderInt64SliceValue = valueCoderFuncs{ } // sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. -func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2181,7 +2181,7 @@ func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. -func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2209,13 +2209,13 @@ var coderInt64PackedSliceValue = valueCoderFuncs{ } // sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. -func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64 wire encodes a int64 pointer as a Sint64. -func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2223,7 +2223,7 @@ func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint64 wire decodes a int64 pointer as a Sint64. -func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2239,7 +2239,7 @@ func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = protowire.DecodeZigZag(v) out.n = n @@ -2255,7 +2255,7 @@ var coderSint64 = pointerCoderFuncs{ // sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. // The zero value is not encoded. -func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -2265,7 +2265,7 @@ func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint64NoZero wire encodes a int64 pointer as a Sint64. // The zero value is not encoded. -func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -2284,14 +2284,14 @@ var coderSint64NoZero = pointerCoderFuncs{ // sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64Ptr wire encodes a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2299,7 +2299,7 @@ func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. -func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2315,7 +2315,7 @@ func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -2334,7 +2334,7 @@ var coderSint64Ptr = pointerCoderFuncs{ } // sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. -func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) @@ -2343,7 +2343,7 @@ func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint64Slice encodes a []int64 pointer as a repeated Sint64. -func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2353,13 +2353,13 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. -func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2374,7 +2374,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeZigZag(v)) b = b[n:] @@ -2398,7 +2398,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeZigZag(v)) out.n = n @@ -2413,7 +2413,7 @@ var coderSint64Slice = pointerCoderFuncs{ } // sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. -func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2426,7 +2426,7 @@ func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. -func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2451,19 +2451,19 @@ var coderSint64PackedSlice = pointerCoderFuncs{ } // sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. -func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) } // appendSint64Value encodes a int64 value as a Sint64. -func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) return b, nil } // consumeSint64Value decodes a int64 value as a Sint64. -func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2479,7 +2479,7 @@ func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil @@ -2493,7 +2493,7 @@ var coderSint64Value = valueCoderFuncs{ } // sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. -func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2503,7 +2503,7 @@ func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint64SliceValue encodes a []int64 value as a repeated Sint64. -func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2514,12 +2514,12 @@ func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. -func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2534,7 +2534,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) b = b[n:] @@ -2557,7 +2557,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) out.n = n @@ -2572,7 +2572,7 @@ var coderSint64SliceValue = valueCoderFuncs{ } // sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. -func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2587,7 +2587,7 @@ func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. -func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2615,13 +2615,13 @@ var coderSint64PackedSliceValue = valueCoderFuncs{ } // sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. -func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() return f.tagsize + protowire.SizeVarint(v) } // appendUint64 wire encodes a uint64 pointer as a Uint64. -func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2629,7 +2629,7 @@ func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint64 wire decodes a uint64 pointer as a Uint64. -func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2645,7 +2645,7 @@ func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -2661,7 +2661,7 @@ var coderUint64 = pointerCoderFuncs{ // sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. // The zero value is not encoded. -func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -2671,7 +2671,7 @@ func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint64NoZero wire encodes a uint64 pointer as a Uint64. // The zero value is not encoded. -func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -2690,14 +2690,14 @@ var coderUint64NoZero = pointerCoderFuncs{ // sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint64Ptr() return f.tagsize + protowire.SizeVarint(v) } // appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2705,7 +2705,7 @@ func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. -func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2721,7 +2721,7 @@ func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -2740,7 +2740,7 @@ var coderUint64Ptr = pointerCoderFuncs{ } // sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. -func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(v) @@ -2749,7 +2749,7 @@ func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. -func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2759,13 +2759,13 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. -func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2780,7 +2780,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -2804,7 +2804,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -2819,7 +2819,7 @@ var coderUint64Slice = pointerCoderFuncs{ } // sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. -func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -2832,7 +2832,7 @@ func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. -func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -2857,19 +2857,19 @@ var coderUint64PackedSlice = pointerCoderFuncs{ } // sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. -func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(v.Uint()) } // appendUint64Value encodes a uint64 value as a Uint64. -func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, v.Uint()) return b, nil } // consumeUint64Value decodes a uint64 value as a Uint64. -func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2885,7 +2885,7 @@ func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -2899,7 +2899,7 @@ var coderUint64Value = valueCoderFuncs{ } // sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. -func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2909,7 +2909,7 @@ func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. -func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2920,12 +2920,12 @@ func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. -func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2940,7 +2940,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -2963,7 +2963,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -2978,7 +2978,7 @@ var coderUint64SliceValue = valueCoderFuncs{ } // sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. -func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2993,7 +2993,7 @@ func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. -func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3021,13 +3021,13 @@ var coderUint64PackedSliceValue = valueCoderFuncs{ } // sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. -func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32 wire encodes a int32 pointer as a Sfixed32. -func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3035,13 +3035,13 @@ func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. -func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -3057,7 +3057,7 @@ var coderSfixed32 = pointerCoderFuncs{ // sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. // The zero value is not encoded. -func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -3067,7 +3067,7 @@ func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. // The zero value is not encoded. -func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -3086,13 +3086,13 @@ var coderSfixed32NoZero = pointerCoderFuncs{ // sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3100,13 +3100,13 @@ func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. -func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -3125,14 +3125,14 @@ var coderSfixed32Ptr = pointerCoderFuncs{ } // sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. -func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. -func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3142,18 +3142,18 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. -func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -3167,7 +3167,7 @@ func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -3182,7 +3182,7 @@ var coderSfixed32Slice = pointerCoderFuncs{ } // sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. -func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -3192,7 +3192,7 @@ func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. -func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -3214,25 +3214,25 @@ var coderSfixed32PackedSlice = pointerCoderFuncs{ } // sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. -func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendSfixed32Value encodes a int32 value as a Sfixed32. -func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Int())) return b, nil } // consumeSfixed32Value decodes a int32 value as a Sfixed32. -func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -3246,14 +3246,14 @@ var coderSfixed32Value = valueCoderFuncs{ } // sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. -func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. -func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3264,17 +3264,17 @@ func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. -func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -3287,7 +3287,7 @@ func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -3302,7 +3302,7 @@ var coderSfixed32SliceValue = valueCoderFuncs{ } // sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. -func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3313,7 +3313,7 @@ func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. -func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3337,13 +3337,13 @@ var coderSfixed32PackedSliceValue = valueCoderFuncs{ } // sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. -func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32 wire encodes a uint32 pointer as a Fixed32. -func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3351,13 +3351,13 @@ func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed32 wire decodes a uint32 pointer as a Fixed32. -func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = v out.n = n @@ -3373,7 +3373,7 @@ var coderFixed32 = pointerCoderFuncs{ // sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. // The zero value is not encoded. -func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -3383,7 +3383,7 @@ func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. // The zero value is not encoded. -func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -3402,13 +3402,13 @@ var coderFixed32NoZero = pointerCoderFuncs{ // sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3416,13 +3416,13 @@ func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. -func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -3441,14 +3441,14 @@ var coderFixed32Ptr = pointerCoderFuncs{ } // sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. -func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. -func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3458,18 +3458,18 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. -func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -3483,7 +3483,7 @@ func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -3498,7 +3498,7 @@ var coderFixed32Slice = pointerCoderFuncs{ } // sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. -func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -3508,7 +3508,7 @@ func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. -func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -3530,25 +3530,25 @@ var coderFixed32PackedSlice = pointerCoderFuncs{ } // sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. -func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFixed32Value encodes a uint32 value as a Fixed32. -func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Uint())) return b, nil } // consumeFixed32Value decodes a uint32 value as a Fixed32. -func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -3562,14 +3562,14 @@ var coderFixed32Value = valueCoderFuncs{ } // sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. -func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. -func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3580,17 +3580,17 @@ func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. -func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -3603,7 +3603,7 @@ func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -3618,7 +3618,7 @@ var coderFixed32SliceValue = valueCoderFuncs{ } // sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. -func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3629,7 +3629,7 @@ func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. -func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3653,13 +3653,13 @@ var coderFixed32PackedSliceValue = valueCoderFuncs{ } // sizeFloat returns the size of wire encoding a float32 pointer as a Float. -func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloat wire encodes a float32 pointer as a Float. -func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3667,13 +3667,13 @@ func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeFloat wire decodes a float32 pointer as a Float. -func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float32() = math.Float32frombits(v) out.n = n @@ -3689,7 +3689,7 @@ var coderFloat = pointerCoderFuncs{ // sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. // The zero value is not encoded. -func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -3699,7 +3699,7 @@ func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendFloatNoZero wire encodes a float32 pointer as a Float. // The zero value is not encoded. -func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -3718,13 +3718,13 @@ var coderFloatNoZero = pointerCoderFuncs{ // sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. // It panics if the pointer is nil. -func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloatPtr wire encodes a *float32 pointer as a Float. // It panics if the pointer is nil. -func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3732,13 +3732,13 @@ func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeFloatPtr wire decodes a *float32 pointer as a Float. -func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float32Ptr() if *vp == nil { @@ -3757,14 +3757,14 @@ var coderFloatPtr = pointerCoderFuncs{ } // sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. -func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFloatSlice encodes a []float32 pointer as a repeated Float. -func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3774,18 +3774,18 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. -func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float32frombits(v)) b = b[n:] @@ -3799,7 +3799,7 @@ func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float32frombits(v)) out.n = n @@ -3814,7 +3814,7 @@ var coderFloatSlice = pointerCoderFuncs{ } // sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. -func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() if len(s) == 0 { return 0 @@ -3824,7 +3824,7 @@ func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. -func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() if len(s) == 0 { return b, nil @@ -3846,25 +3846,25 @@ var coderFloatPackedSlice = pointerCoderFuncs{ } // sizeFloatValue returns the size of wire encoding a float32 value as a Float. -func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFloatValue encodes a float32 value as a Float. -func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) return b, nil } // consumeFloatValue decodes a float32 value as a Float. -func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil @@ -3878,14 +3878,14 @@ var coderFloatValue = valueCoderFuncs{ } // sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. -func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFloatSliceValue encodes a []float32 value as a repeated Float. -func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3896,17 +3896,17 @@ func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. -func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) b = b[n:] @@ -3919,7 +3919,7 @@ func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) out.n = n @@ -3934,7 +3934,7 @@ var coderFloatSliceValue = valueCoderFuncs{ } // sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. -func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3945,7 +3945,7 @@ func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. -func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3969,13 +3969,13 @@ var coderFloatPackedSliceValue = valueCoderFuncs{ } // sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. -func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64 wire encodes a int64 pointer as a Sfixed64. -func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -3983,13 +3983,13 @@ func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. -func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -4005,7 +4005,7 @@ var coderSfixed64 = pointerCoderFuncs{ // sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. // The zero value is not encoded. -func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -4015,7 +4015,7 @@ func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. // The zero value is not encoded. -func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -4034,13 +4034,13 @@ var coderSfixed64NoZero = pointerCoderFuncs{ // sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -4048,13 +4048,13 @@ func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. -func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -4073,14 +4073,14 @@ var coderSfixed64Ptr = pointerCoderFuncs{ } // sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. -func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. -func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4090,18 +4090,18 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. -func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -4115,7 +4115,7 @@ func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -4130,7 +4130,7 @@ var coderSfixed64Slice = pointerCoderFuncs{ } // sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. -func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -4140,7 +4140,7 @@ func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. -func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -4162,25 +4162,25 @@ var coderSfixed64PackedSlice = pointerCoderFuncs{ } // sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. -func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendSfixed64Value encodes a int64 value as a Sfixed64. -func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, uint64(v.Int())) return b, nil } // consumeSfixed64Value decodes a int64 value as a Sfixed64. -func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -4194,14 +4194,14 @@ var coderSfixed64Value = valueCoderFuncs{ } // sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. -func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. -func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4212,17 +4212,17 @@ func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. -func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -4235,7 +4235,7 @@ func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -4250,7 +4250,7 @@ var coderSfixed64SliceValue = valueCoderFuncs{ } // sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. -func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4261,7 +4261,7 @@ func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. -func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4285,13 +4285,13 @@ var coderSfixed64PackedSliceValue = valueCoderFuncs{ } // sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. -func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64 wire encodes a uint64 pointer as a Fixed64. -func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4299,13 +4299,13 @@ func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed64 wire decodes a uint64 pointer as a Fixed64. -func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -4321,7 +4321,7 @@ var coderFixed64 = pointerCoderFuncs{ // sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. // The zero value is not encoded. -func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -4331,7 +4331,7 @@ func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. // The zero value is not encoded. -func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -4350,13 +4350,13 @@ var coderFixed64NoZero = pointerCoderFuncs{ // sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4364,13 +4364,13 @@ func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. -func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -4389,14 +4389,14 @@ var coderFixed64Ptr = pointerCoderFuncs{ } // sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. -func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. -func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4406,18 +4406,18 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. -func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -4431,7 +4431,7 @@ func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -4446,7 +4446,7 @@ var coderFixed64Slice = pointerCoderFuncs{ } // sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. -func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -4456,7 +4456,7 @@ func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. -func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -4478,25 +4478,25 @@ var coderFixed64PackedSlice = pointerCoderFuncs{ } // sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. -func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendFixed64Value encodes a uint64 value as a Fixed64. -func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, v.Uint()) return b, nil } // consumeFixed64Value decodes a uint64 value as a Fixed64. -func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -4510,14 +4510,14 @@ var coderFixed64Value = valueCoderFuncs{ } // sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. -func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. -func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4528,17 +4528,17 @@ func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. -func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -4551,7 +4551,7 @@ func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -4566,7 +4566,7 @@ var coderFixed64SliceValue = valueCoderFuncs{ } // sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. -func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4577,7 +4577,7 @@ func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. -func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4601,13 +4601,13 @@ var coderFixed64PackedSliceValue = valueCoderFuncs{ } // sizeDouble returns the size of wire encoding a float64 pointer as a Double. -func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDouble wire encodes a float64 pointer as a Double. -func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4615,13 +4615,13 @@ func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeDouble wire decodes a float64 pointer as a Double. -func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float64() = math.Float64frombits(v) out.n = n @@ -4637,7 +4637,7 @@ var coderDouble = pointerCoderFuncs{ // sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. // The zero value is not encoded. -func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -4647,7 +4647,7 @@ func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendDoubleNoZero wire encodes a float64 pointer as a Double. // The zero value is not encoded. -func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -4666,13 +4666,13 @@ var coderDoubleNoZero = pointerCoderFuncs{ // sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. // It panics if the pointer is nil. -func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDoublePtr wire encodes a *float64 pointer as a Double. // It panics if the pointer is nil. -func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4680,13 +4680,13 @@ func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeDoublePtr wire decodes a *float64 pointer as a Double. -func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float64Ptr() if *vp == nil { @@ -4705,14 +4705,14 @@ var coderDoublePtr = pointerCoderFuncs{ } // sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. -func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendDoubleSlice encodes a []float64 pointer as a repeated Double. -func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4722,18 +4722,18 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. -func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float64frombits(v)) b = b[n:] @@ -4747,7 +4747,7 @@ func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float64frombits(v)) out.n = n @@ -4762,7 +4762,7 @@ var coderDoubleSlice = pointerCoderFuncs{ } // sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. -func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() if len(s) == 0 { return 0 @@ -4772,7 +4772,7 @@ func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. -func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() if len(s) == 0 { return b, nil @@ -4794,25 +4794,25 @@ var coderDoublePackedSlice = pointerCoderFuncs{ } // sizeDoubleValue returns the size of wire encoding a float64 value as a Double. -func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendDoubleValue encodes a float64 value as a Double. -func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) return b, nil } // consumeDoubleValue decodes a float64 value as a Double. -func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil @@ -4826,14 +4826,14 @@ var coderDoubleValue = valueCoderFuncs{ } // sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. -func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendDoubleSliceValue encodes a []float64 value as a repeated Double. -func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4844,17 +4844,17 @@ func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. -func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) b = b[n:] @@ -4867,7 +4867,7 @@ func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Num } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) out.n = n @@ -4882,7 +4882,7 @@ var coderDoubleSliceValue = valueCoderFuncs{ } // sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. -func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4893,7 +4893,7 @@ func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. -func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4917,13 +4917,13 @@ var coderDoublePackedSliceValue = valueCoderFuncs{ } // sizeString returns the size of wire encoding a string pointer as a String. -func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() return f.tagsize + protowire.SizeBytes(len(v)) } // appendString wire encodes a string pointer as a String. -func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4931,15 +4931,15 @@ func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeString wire decodes a string pointer as a String. -func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4952,7 +4952,7 @@ var coderString = pointerCoderFuncs{ } // appendStringValidateUTF8 wire encodes a string pointer as a String. -func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4963,18 +4963,18 @@ func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalO } // consumeStringValidateUTF8 wire decodes a string pointer as a String. -func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4988,7 +4988,7 @@ var coderStringValidateUTF8 = pointerCoderFuncs{ // sizeStringNoZero returns the size of wire encoding a string pointer as a String. // The zero value is not encoded. -func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() if len(v) == 0 { return 0 @@ -4998,7 +4998,7 @@ func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendStringNoZero wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5017,7 +5017,7 @@ var coderStringNoZero = pointerCoderFuncs{ // appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5039,14 +5039,14 @@ var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ // sizeStringPtr returns the size of wire encoding a *string pointer as a String. // It panics if the pointer is nil. -func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.StringPtr() return f.tagsize + protowire.SizeBytes(len(v)) } // appendStringPtr wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5054,19 +5054,19 @@ func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeStringPtr wire decodes a *string pointer as a String. -func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5080,7 +5080,7 @@ var coderStringPtr = pointerCoderFuncs{ // appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5091,22 +5091,22 @@ func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marsh } // consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. -func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5119,7 +5119,7 @@ var coderStringPtrValidateUTF8 = pointerCoderFuncs{ } // sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. -func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.StringSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5128,7 +5128,7 @@ func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendStringSlice encodes a []string pointer as a repeated String. -func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5138,16 +5138,16 @@ func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeStringSlice wire decodes a []string pointer as a repeated String. -func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.StringSlice() if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *sp = append(*sp, v) + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5160,7 +5160,7 @@ var coderStringSlice = pointerCoderFuncs{ } // appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. -func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5173,19 +5173,19 @@ func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. -func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.StringSlice() +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *sp = append(*sp, v) + sp := p.StringSlice() + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5198,25 +5198,25 @@ var coderStringSliceValidateUTF8 = pointerCoderFuncs{ } // sizeStringValue returns the size of wire encoding a string value as a String. -func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.String())) } // appendStringValue encodes a string value as a String. -func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) return b, nil } // consumeStringValue decodes a string value as a String. -func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfString(string(v)), out, nil @@ -5230,7 +5230,7 @@ var coderStringValue = valueCoderFuncs{ } // appendStringValueValidateUTF8 encodes a string value as a String. -func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) if !utf8.ValidString(v.String()) { @@ -5240,15 +5240,15 @@ func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint6 } // consumeStringValueValidateUTF8 decodes a string value as a String. -func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return protoreflect.Value{}, out, errInvalidUTF8{} } out.n = n @@ -5263,7 +5263,7 @@ var coderStringValueValidateUTF8 = valueCoderFuncs{ } // sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. -func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5273,7 +5273,7 @@ func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendStringSliceValue encodes a []string value as a repeated String. -func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5284,14 +5284,14 @@ func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeStringSliceValue wire decodes a []string value as a repeated String. -func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfString(string(v))) out.n = n @@ -5306,13 +5306,13 @@ var coderStringSliceValue = valueCoderFuncs{ } // sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. -func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() return f.tagsize + protowire.SizeBytes(len(v)) } // appendBytes wire encodes a []byte pointer as a Bytes. -func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5320,13 +5320,13 @@ func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeBytes wire decodes a []byte pointer as a Bytes. -func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(emptyBuf[:], v...) out.n = n @@ -5341,7 +5341,7 @@ var coderBytes = pointerCoderFuncs{ } // appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. -func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5352,13 +5352,13 @@ func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOp } // consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5377,7 +5377,7 @@ var coderBytesValidateUTF8 = pointerCoderFuncs{ // sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. // The zero value is not encoded. -func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() if len(v) == 0 { return 0 @@ -5387,7 +5387,7 @@ func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendBytesNoZero wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5399,13 +5399,13 @@ func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) // consumeBytesNoZero wire decodes a []byte pointer as a Bytes. // The zero value is not decoded. -func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(([]byte)(nil), v...) out.n = n @@ -5421,7 +5421,7 @@ var coderBytesNoZero = pointerCoderFuncs{ // appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5435,13 +5435,13 @@ func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5459,7 +5459,7 @@ var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ } // sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. -func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BytesSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5468,7 +5468,7 @@ func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5478,14 +5478,14 @@ func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BytesSlice() if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n @@ -5500,7 +5500,7 @@ var coderBytesSlice = pointerCoderFuncs{ } // appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5513,18 +5513,18 @@ func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mars } // consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.BytesSlice() +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} } + sp := p.BytesSlice() *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n return out, nil @@ -5538,25 +5538,25 @@ var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ } // sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. -func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.Bytes())) } // appendBytesValue encodes a []byte value as a Bytes. -func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendBytes(b, v.Bytes()) return b, nil } // consumeBytesValue decodes a []byte value as a Bytes. -func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil @@ -5570,7 +5570,7 @@ var coderBytesValue = valueCoderFuncs{ } // sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. -func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5580,7 +5580,7 @@ func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. -func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5591,14 +5591,14 @@ func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. -func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) out.n = n diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 44885a761f6..c1245fef487 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -5,7 +5,6 @@ package impl import ( - "errors" "reflect" "sort" @@ -118,7 +117,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -127,10 +126,10 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -157,7 +156,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err @@ -175,7 +174,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -184,10 +183,10 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -208,7 +207,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi var v []byte v, n = protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var o unmarshalOutput o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) @@ -221,7 +220,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 0e176d565d4..cd40527ff64 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/order" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -27,6 +27,7 @@ type coderMessageInfo struct { coderFields map[protowire.Number]*coderFieldInfo sizecacheOffset offset unknownOffset offset + unknownPtrKind bool extensionOffset offset needsInitCheck bool isMessageSet bool @@ -47,9 +48,20 @@ type coderFieldInfo struct { } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { - mi.sizecacheOffset = si.sizecacheOffset - mi.unknownOffset = si.unknownOffset - mi.extensionOffset = si.extensionOffset + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } mi.coderFields = make(map[protowire.Number]*coderFieldInfo) fields := mi.Desc.Fields() @@ -73,6 +85,27 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { var funcs pointerCoderFuncs var childMessage *MessageInfo switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } case isOneof: fieldOffset = offsetOf(fs, mi.Exporter) case fd.IsWeak(): @@ -136,7 +169,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { sort.Slice(mi.orderedCoderFields, func(i, j int) bool { fi := fields.ByNumber(mi.orderedCoderFields[i].num) fj := fields.ByNumber(mi.orderedCoderFields[j].num) - return fieldsort.Less(fi, fj) + return order.LegacyFieldOrder(fi, fj) }) } @@ -157,3 +190,28 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.methods.Merge = mi.merge } } + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index cfb68e12fbf..b7a23faf1e4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -29,8 +29,9 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } - unknown := *p.Apply(mi.unknownOffset).Bytes() - size += messageset.SizeUnknown(unknown) + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } return size } @@ -69,10 +70,12 @@ func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions } } - unknown := *p.Apply(mi.unknownOffset).Bytes() - b, err := messageset.AppendUnknown(b, unknown) - if err != nil { - return b, err + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } } return b, nil @@ -100,13 +103,13 @@ func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOpt *ep = make(map[int32]ExtensionField) } ext := *ep - unknown := p.Apply(mi.unknownOffset).Bytes() initialized := true err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) if err == errUnknown { - *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) - *unknown = append(*unknown, v...) + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) return nil } if !o.initialized { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 86f7dc3c9d7..90705e3aea7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -30,7 +30,7 @@ func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } p.v.Elem().SetInt(int64(v)) out.n = n @@ -130,12 +130,12 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) @@ -150,7 +150,7 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 36a90dff381..acd61bb50b2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -423,6 +423,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } if m, ok := v.Interface().(pref.ProtoMessage); ok { return pref.ValueOfMessage(m.ProtoReflect()) } @@ -437,6 +444,16 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } if rv.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) } @@ -451,6 +468,9 @@ func (c *messageConverter) IsValidPB(v pref.Value) bool { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } return rv.Type() == c.goType } @@ -459,9 +479,18 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { } func (c *messageConverter) New() pref.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } return c.PBValueOf(reflect.New(c.goType.Elem())) } func (c *messageConverter) Zero() pref.Value { return c.PBValueOf(reflect.Zero(c.goType)) } + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 85ba1d3b334..949dc49a65b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -17,6 +17,8 @@ import ( piface "google.golang.org/protobuf/runtime/protoiface" ) +var errDecode = errors.New("cannot parse invalid wire-format data") + type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags resolver interface { @@ -100,13 +102,13 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. var n int tag, n = protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } b = b[n:] } var num protowire.Number if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { - return out, errors.New("invalid field number") + return out, errDecode } else { num = protowire.Number(n) } @@ -114,7 +116,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if wtyp == protowire.EndGroupType { if num != groupTag { - return out, errors.New("mismatching end group marker") + return out, errDecode } groupTag = 0 break @@ -170,10 +172,10 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. } n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { - u := p.Apply(mi.unknownOffset).Bytes() + u := mi.mutableUnknownBytes(p) *u = protowire.AppendTag(*u, num, wtyp) *u = append(*u, b[:n]...) } @@ -181,7 +183,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. b = b[n:] } if groupTag != 0 { - return out, errors.New("missing end group marker") + return out, errDecode } if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { initialized = false @@ -221,7 +223,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, nil } case ValidationInvalid: - return out, errors.New("invalid wire format") + return out, errDecode case ValidationUnknown: } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 8c8a794c631..845c67d6e7e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -79,8 +79,9 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int size += f.funcs.size(fptr, f, opts) } if mi.unknownOffset.IsValid() { - u := *p.Apply(mi.unknownOffset).Bytes() - size += len(u) + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } } if mi.sizecacheOffset.IsValid() { if size > math.MaxInt32 { @@ -141,8 +142,9 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt } } if mi.unknownOffset.IsValid() && !mi.isMessageSet { - u := *p.Apply(mi.unknownOffset).Bytes() - b = append(b, u...) + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } } return b, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index c3d741c2f0c..e3fb0b57858 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -30,7 +30,7 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), name) + return legacyLoadMessageType(reflect.TypeOf(m), name) } // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 61757ce50a7..49e723161c0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -154,7 +154,8 @@ func (x placeholderExtension) Number() pref.FieldNumber { retu func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } func (x placeholderExtension) Kind() pref.Kind { return 0 } func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } func (x placeholderExtension) HasPresence() bool { return false } func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 06c68e11702..3759b010c0c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -24,14 +24,24 @@ import ( // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. func legacyWrapMessage(v reflect.Value) pref.Message { - typ := v.Type() - if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} } - mt := legacyLoadMessageInfo(typ, "") + mt := legacyLoadMessageInfo(t, "") return mt.MessageOf(v.Interface()) } +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, @@ -49,8 +59,9 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { GoReflectType: t, } + var hasMarshal, hasUnmarshal bool v := reflect.Zero(t).Interface() - if _, ok := v.(legacyMarshaler); ok { + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { mi.methods.Marshal = legacyMarshal // We have no way to tell whether the type's Marshal method @@ -59,10 +70,10 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // calling Marshal methods when present. mi.methods.Flags |= piface.SupportMarshalDeterministic } - if _, ok := v.(legacyUnmarshaler); ok { + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal } - if _, ok := v.(legacyMerger); ok { + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { mi.methods.Merge = legacyMerge } @@ -75,7 +86,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor // LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, -// which must be a *struct kind and not implement the v2 API already. +// which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { @@ -114,17 +125,19 @@ func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescr // If the Go type has no fields, then this might be a proto3 empty message // from before the size cache was added. If there are any fields, check to // see that at least one of them looks like something we generated. - if nfield := t.Elem().NumField(); nfield > 0 { - hasProtoField := false - for i := 0; i < nfield; i++ { - f := t.Elem().Field(i) - if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { - hasProtoField = true - break + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) } - } - if !hasProtoField { - return aberrantLoadMessageDesc(t, name) } } @@ -370,7 +383,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var legacyProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &piface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -401,18 +414,40 @@ func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } func legacyMerge(in piface.MergeInput) piface.MergeOutput { + // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) if !ok { return piface.MergeOutput{} } - merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) + if !ok { + return piface.MergeOutput{} + } + b, err := marshaler.Marshal() + if err != nil { + return piface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return piface.MergeOutput{} + } return piface.MergeOutput{Flags: piface.MergeComplete} } @@ -422,6 +457,9 @@ type aberrantMessageType struct { } func (mt aberrantMessageType) New() pref.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) Zero() pref.Message { @@ -443,6 +481,17 @@ type aberrantMessage struct { v reflect.Value } +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + func (m aberrantMessage) ProtoReflect() pref.Message { return m } @@ -454,33 +503,40 @@ func (m aberrantMessage) Type() pref.MessageType { return aberrantMessageType{m.v.Type()} } func (m aberrantMessage) New() pref.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } return aberrantMessage{reflect.Zero(m.v.Type())} } func (m aberrantMessage) Interface() pref.ProtoMessage { return m } func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + return } func (m aberrantMessage) Has(pref.FieldDescriptor) bool { - panic("invalid field descriptor") + return false } func (m aberrantMessage) Clear(pref.FieldDescriptor) { - panic("invalid field descriptor") + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") +func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { - panic("invalid field descriptor") + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { - panic("invalid oneof descriptor") + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) GetUnknown() pref.RawFields { return nil @@ -489,13 +545,13 @@ func (m aberrantMessage) SetUnknown(pref.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { - // An invalid message is a read-only, empty message. Since we don't know anything - // about the alleged contents of this message, we can't say with confidence that - // it is invalid in this sense. Therefore, report it as valid. - return true + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false } func (m aberrantMessage) ProtoMethods() *piface.Methods { - return legacyProtoMethods + return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { return m.v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index cdc4267dfad..c65bbc0446e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -77,9 +77,9 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } if mi.unknownOffset.IsValid() { - du := dst.Apply(mi.unknownOffset).Bytes() - su := src.Apply(mi.unknownOffset).Bytes() - if len(*su) > 0 { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) *du = append(*du, *su...) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index c026a98180d..a104e28e858 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -109,22 +110,29 @@ func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { type ( SizeCache = int32 WeakFields = map[int32]protoreflect.ProtoMessage - UnknownFields = []byte + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte ExtensionFields = map[int32]ExtensionField ) var ( sizecacheType = reflect.TypeOf(SizeCache(0)) weakFieldsType = reflect.TypeOf(WeakFields(nil)) - unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) ) type structInfo struct { sizecacheOffset offset + sizecacheType reflect.Type weakOffset offset + weakType reflect.Type unknownOffset offset + unknownType reflect.Type extensionOffset offset + extensionType reflect.Type fieldsByNumber map[pref.FieldNumber]reflect.StructField oneofsByName map[pref.Name]reflect.StructField @@ -151,18 +159,22 @@ fieldLoop: case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: - if f.Type == unknownFieldsType { + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type } default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { @@ -212,4 +224,53 @@ func (mi *MessageInfo) New() protoreflect.Message { func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) } -func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType interface{} // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 0f4b8db760a..9488b726131 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" + "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -16,6 +17,11 @@ type reflectMessageInfo struct { fields map[pref.FieldNumber]*fieldInfo oneofs map[pref.Name]*oneofInfo + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[pref.FieldNumber]interface{} + // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) // It provides faster access to the fieldInfo, but may be incomplete. @@ -36,6 +42,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { mi.makeKnownFieldsFunc(si) mi.makeUnknownFieldsFunc(t, si) mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) } // makeKnownFieldsFunc generates functions for operations that can be performed @@ -51,17 +58,23 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } var fi fieldInfo switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) case fd.IsMap(): fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) case fd.IsWeak(): fi = fieldInfoForWeakMessage(fd, si.weakOffset) - case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: fi = fieldInfoForScalar(fd, fs, mi.Exporter) @@ -92,27 +105,53 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { i++ } } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } } func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { - mi.getUnknown = func(pointer) pref.RawFields { return nil } - mi.setUnknown = func(pointer, pref.RawFields) { return } - if si.unknownOffset.IsValid() { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. mi.getUnknown = func(p pointer) pref.RawFields { if p.IsNil() { return nil } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - return pref.RawFields(*rv.Interface().(*[]byte)) + return *p.Apply(mi.unknownOffset).Bytes() } mi.setUnknown = func(p pointer, b pref.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - *rv.Interface().(*[]byte) = []byte(b) + *p.Apply(mi.unknownOffset).Bytes() = b } - } else { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: mi.getUnknown = func(pointer) pref.RawFields { return nil } @@ -139,6 +178,58 @@ func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { } } } +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} type extensionMap map[int32]ExtensionField @@ -306,7 +397,6 @@ var ( // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { - // TODO: Switch the input to be an opaque Pointer. if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -320,6 +410,17 @@ func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { func (m *messageReflectWrapper) pointer() pointer { return m.p } func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} func (m *messageIfaceWrapper) ProtoReflect() pref.Message { return (*messageReflectWrapper)(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 23124a86e40..343cf872197 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -28,6 +28,39 @@ type fieldInfo struct { newField func() pref.Value } +func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) pref.Value { + return fd.Default() + }, + set: func(p pointer, v pref.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) pref.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() pref.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() pref.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { @@ -97,7 +130,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv.Set(reflect.New(ot)) } rv = rv.Elem().Elem().Field(0) - if rv.IsNil() { + if rv.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) @@ -225,7 +258,10 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { - panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false } if ft.Kind() == reflect.Ptr { ft = ft.Elem() @@ -388,6 +424,9 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo return false } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } return !rv.IsNil() }, clear: func(p pointer) { @@ -404,13 +443,13 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo set: func(p pointer, v pref.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, mutable: func(p pointer) pref.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) @@ -464,3 +503,41 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } return oi } + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 67b4ede6705..9e3ed821efb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -121,6 +121,7 @@ func (p pointer) String() *string { return p.v.Interface().(*string) } func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } func (p pointer) Extensions() *map[int32]ExtensionField { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 088aa85d483..9ecf23a85bb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -109,6 +109,7 @@ func (p pointer) String() *string { return (*string)(p.p) func (p pointer) StringPtr() **string { return (**string)(p.p) } func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go deleted file mode 100644 index a3de1cf3241..00000000000 --- a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mapsort provides sorted access to maps. -package mapsort - -import ( - "sort" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Range iterates over every map entry in sorted key order, -// calling f for each key and value encountered. -func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - var keys []protoreflect.MapKey - mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { - keys = append(keys, key) - return true - }) - sort.Slice(keys, func(i, j int) bool { - switch keyKind { - case protoreflect.BoolKind: - return !keys[i].Bool() && keys[j].Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return keys[i].Int() < keys[j].Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, - protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return keys[i].Uint() < keys[j].Uint() - case protoreflect.StringKind: - return keys[i].String() < keys[j].String() - default: - panic("invalid kind: " + keyKind.String()) - } - }) - for _, key := range keys { - if !f(key, mapv.Get(key)) { - break - } - } -} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 00000000000..2a24953f6a4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y pref.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od pref.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if ox != nil && oy != nil && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y pref.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 00000000000..c8090e0c547 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd pref.FieldDescriptor + v pref.Value +} + +var messageFieldPool = sync.Pool{ + New: func() interface{} { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called everytime a message field is visited. + VisitField = func(pref.FieldDescriptor, pref.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k pref.MapKey + v pref.Value +} + +var mapEntryPool = sync.Pool{ + New: func() interface{} { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called everytime a map entry is visited. + VisitEntry = func(pref.MapKey, pref.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k pref.MapKey, v pref.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 72cf770b427..5879131da78 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 25 + Minor = 26 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 42fc5195e87..49f9b8c88cf 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -45,12 +45,14 @@ type UnmarshalOptions struct { } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { _, err := o.unmarshal(b, m.ProtoReflect()) return err @@ -116,10 +118,10 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) // Parse the tag (field number and wire type). num, wtyp, tagLen := protowire.ConsumeTag(b) if tagLen < 0 { - return protowire.ParseError(tagLen) + return errDecode } if num > protowire.MaxValidNumber { - return errors.New("invalid field number") + return errDecode } // Find the field descriptor for this field number. @@ -159,7 +161,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) } valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) if valLen < 0 { - return protowire.ParseError(valLen) + return errDecode } if !o.DiscardUnknown { m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) @@ -194,7 +196,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto } b, n = protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } var ( keyField = fd.MapKey() @@ -213,10 +215,10 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if num > protowire.MaxValidNumber { - return 0, errors.New("invalid field number") + return 0, errDecode } b = b[n:] err = errUnknown @@ -246,7 +248,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } } else if err != nil { return 0, err @@ -272,3 +274,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto // to the unknown field set of a message. It is never returned from an exported // function. var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go index d6dc904dccf..301eeb20f82 100644 --- a/vendor/google.golang.org/protobuf/proto/decode_gen.go +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -27,7 +27,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil case protoreflect.EnumKind: @@ -36,7 +36,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil case protoreflect.Int32Kind: @@ -45,7 +45,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Sint32Kind: @@ -54,7 +54,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil case protoreflect.Uint32Kind: @@ -63,7 +63,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.Int64Kind: @@ -72,7 +72,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Sint64Kind: @@ -81,7 +81,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil case protoreflect.Uint64Kind: @@ -90,7 +90,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.Sfixed32Kind: @@ -99,7 +99,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Fixed32Kind: @@ -108,7 +108,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.FloatKind: @@ -117,7 +117,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil case protoreflect.Sfixed64Kind: @@ -126,7 +126,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Fixed64Kind: @@ -135,7 +135,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.DoubleKind: @@ -144,7 +144,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil case protoreflect.StringKind: @@ -153,7 +153,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) @@ -165,7 +165,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil case protoreflect.MessageKind: @@ -174,7 +174,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil case protoreflect.GroupKind: @@ -183,7 +183,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil default: @@ -197,12 +197,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) @@ -214,7 +214,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) return n, nil @@ -222,12 +222,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) @@ -239,7 +239,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) return n, nil @@ -247,12 +247,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -264,7 +264,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -272,12 +272,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) @@ -289,7 +289,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) return n, nil @@ -297,12 +297,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -314,7 +314,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -322,12 +322,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -339,7 +339,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -347,12 +347,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) @@ -364,7 +364,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) return n, nil @@ -372,12 +372,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -389,7 +389,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -397,12 +397,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -414,7 +414,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -422,12 +422,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -439,7 +439,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -447,12 +447,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) @@ -464,7 +464,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) return n, nil @@ -472,12 +472,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -489,7 +489,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -497,12 +497,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -514,7 +514,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -522,12 +522,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) @@ -539,7 +539,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) return n, nil @@ -549,7 +549,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return 0, errors.InvalidUTF8(string(fd.FullName())) @@ -562,7 +562,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) return n, nil @@ -572,7 +572,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { @@ -586,7 +586,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 7b47a1180e4..d18239c2372 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,9 @@ package proto import ( - "sort" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" @@ -211,14 +208,15 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] if messageset.IsMessageSet(m.Descriptor()) { return o.marshalMessageSet(b, m) } - // There are many choices for what order we visit fields in. The default one here - // is chosen for reasonable efficiency and simplicity given the protoreflect API. - // It is not deterministic, since Message.Range does not return fields in any - // defined order. - // - // When using deterministic serialization, we sort the known fields. + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalField(b, fd, v) return err == nil }) @@ -229,27 +227,6 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] return b, nil } -// rangeFields visits fields in a defined order when deterministic serialization is enabled. -func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if !o.Deterministic { - m.Range(f) - return - } - var fds []protoreflect.FieldDescriptor - m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - fds = append(fds, fd) - return true - }) - sort.Slice(fds, func(a, b int) bool { - return fieldsort.Less(fds[a], fds[b]) - }) - for _, fd := range fds { - if !f(fd, m.Get(fd)) { - break - } - } -} - func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { switch { case fd.IsList(): @@ -292,8 +269,12 @@ func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, l func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { keyf := fd.MapKey() valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } var err error - o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) var pos int b, pos = appendSpeculativeLength(b) @@ -312,14 +293,6 @@ func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, ma return b, err } -func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - if !o.Deterministic { - mapv.Range(f) - return - } - mapsort.Range(mapv, kind, f) -} - // When encoding length-prefixed fields, we speculatively set aside some number of bytes // for the length, encode the data, and then encode the length (shifting the data if necessary // to make room). diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 10902bd851e..4dba2b96997 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -111,18 +111,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { // equalValue compares two singular values. func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch { - case fd.Message() != nil: - return equalMessage(x.Message(), y.Message()) - case fd.Kind() == pref.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + switch fd.Kind() { + case pref.BoolKind: + return x.Bool() == y.Bool() + case pref.EnumKind: + return x.Enum() == y.Enum() + case pref.Int32Kind, pref.Sint32Kind, + pref.Int64Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + return x.Int() == y.Int() + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + return x.Uint() == y.Uint() + case pref.FloatKind, pref.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy + case pref.StringKind: + return x.String() == y.String() + case pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case pref.MessageKind, pref.GroupKind: + return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() } diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 1d692c3a8b3..312d5d45c60 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -28,8 +29,12 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b if !flags.ProtoLegacy { return b, errors.New("no support for message_set_wire_format") } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalMessageSetField(b, fd, v) return err == nil }) diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index ca14b09c341..1f0d183b102 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -32,3 +32,12 @@ var Error error func init() { Error = errors.Error } + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 00000000000..e4dfb120506 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + f.L2.Locations.File = f + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 00000000000..37efda1afe9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.StringName.InitJSON(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.StringName.InitJSON(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 00000000000..cebb36cdade --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 00000000000..9af1d56487a --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,374 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 00000000000..a7c5ceffc9b --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,252 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 32ea3d98cd2..121ba3a07bb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -4,6 +4,10 @@ package protoreflect +import ( + "strconv" +) + // SourceLocations is a list of source locations. type SourceLocations interface { // Len reports the number of source locations in the proto file. @@ -11,9 +15,20 @@ type SourceLocations interface { // Get returns the ith SourceLocation. It panics if out of bounds. Get(int) SourceLocation - doNotImplement + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation - // TODO: Add ByPath and ByDescriptor helper methods. + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement } // SourceLocation describes a source location and @@ -39,6 +54,10 @@ type SourceLocation struct { LeadingComments string // TrailingComments is the trailing attached comment for the declaration. TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int } // SourcePath identifies part of a file descriptor for a source location. @@ -48,5 +67,62 @@ type SourceLocation struct { // See google.protobuf.SourceCodeInfo.Location.path. type SourcePath []int32 -// TODO: Add SourcePath.String method to pretty-print the path. For example: -// ".message_type[6].nested_type[15].field[3]" +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 00000000000..b03c1223c4a --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,461 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5be14a72584..8e53c44a918 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -232,11 +232,15 @@ type MessageDescriptor interface { type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } // MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// MessageFieldTypes interface. type MessageType interface { // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. New() Message // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. Zero() Message // Descriptor returns the message descriptor. @@ -245,6 +249,26 @@ type MessageType interface { Descriptor() MessageDescriptor } +// MessageFieldTypes extends a MessageType by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + // MessageDescriptors is a list of message declarations. type MessageDescriptors interface { // Len reports the number of messages. @@ -279,8 +303,15 @@ type FieldDescriptor interface { // JSONName reports the name used for JSON serialization. // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. JSONName() string + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + // HasPresence reports whether the field distinguishes between unpopulated // and default values. HasPresence() bool @@ -371,6 +402,9 @@ type FieldDescriptors interface { // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. // It returns nil if not found. ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor // ByNumber returns the FieldDescriptor for a field numbered n. // It returns nil if not found. ByNumber(n FieldNumber) FieldDescriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 5e5f9671646..66dcbcd0d21 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -17,24 +17,49 @@ package protoregistry import ( "fmt" - "log" + "os" "strings" "sync" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" ) +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + // ignoreConflict reports whether to ignore a registration conflict // given the descriptor being registered and the error. // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { - log.Printf(""+ - "WARNING: %v\n"+ - "A future release will panic on registration conflicts. See:\n"+ - "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ - "\n", err) - return true + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } } var globalMutex sync.RWMutex @@ -96,38 +121,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { } path := file.Path() if prev := r.filesByPath[path]; prev != nil { - // TODO: Remove this after some soak-in period after moving these types. - var prevPath string - const prevModule = "google.golang.org/genproto" - const prevVersion = "cb27e3aa (May 26th, 2020)" - switch path { - case "google/protobuf/field_mask.proto": - prevPath = prevModule + "/protobuf/field_mask" - case "google/protobuf/api.proto": - prevPath = prevModule + "/protobuf/api" - case "google/protobuf/type.proto": - prevPath = prevModule + "/protobuf/ptype" - case "google/protobuf/source_context.proto": - prevPath = prevModule + "/protobuf/source_context" - } - if r == GlobalFiles && prevPath != "" { - pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") - pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" - currPath := "google.golang.org/protobuf/types/known/" + pkgName - panic(fmt.Sprintf(""+ - "duplicate registration of %q\n"+ - "\n"+ - "The generated definition for this file has moved:\n"+ - "\tfrom: %q\n"+ - "\tto: %q\n"+ - "A dependency on the %q module must\n"+ - "be at version %v or higher.\n"+ - "\n"+ - "Upgrade the dependency by running:\n"+ - "\tgo get -u %v\n", - path, prevPath, currPath, prevModule, prevVersion, prevPath)) - } - + r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) err = amendErrorWithCaller(err, prev, file) if r == GlobalFiles && ignoreConflict(file, err) { @@ -178,6 +172,47 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { return nil } +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + // FindDescriptorByName looks up a descriptor by the full name. // // This returns (nil, NotFound) if not found. @@ -560,13 +595,25 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp return nil, NotFound } -// FindMessageByName looks up a message by its full name. -// E.g., "google.protobuf.Any" +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". // -// This return (nil, NotFound) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - // The full name by itself is a valid URL. - return r.FindMessageByURL(string(message)) + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound } // FindMessageByURL looks up a message by a URL identifier. @@ -574,6 +621,8 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // // This returns (nil, NotFound) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. if r == nil { return nil, NotFound } @@ -613,6 +662,26 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E if xt, _ := v.(protoreflect.ExtensionType); xt != nil { return xt, nil } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) } return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go new file mode 100644 index 00000000000..f77239fc3b0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -0,0 +1,4039 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptorpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. +) + +// Enum value maps for FieldDescriptorProto_Type. +var ( + FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +// Enum value maps for FieldDescriptorProto_Label. +var ( + FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", + } + FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, + } +) + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. +) + +// Enum value maps for FileOptions_OptimizeMode. +var ( + FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", + } + FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, + } +) + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() +} + +func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[2] +} + +func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(num) + return nil +} + +// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +// Enum value maps for FieldOptions_CType. +var ( + FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", + } + FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, + } +) + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() +} + +func (FieldOptions_CType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[3] +} + +func (x FieldOptions_CType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_CType(num) + return nil +} + +// Deprecated: Use FieldOptions_CType.Descriptor instead. +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +// Enum value maps for FieldOptions_JSType. +var ( + FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", + } + FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, + } +) + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[4] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` +} + +func (x *FileDescriptorSet) Reset() { + *x = FileDescriptorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorSet) ProtoMessage() {} + +func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if x != nil { + return x.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` +} + +func (x *FileDescriptorProto) Reset() { + *x = FileDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorProto) ProtoMessage() {} + +func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + +func (x *FileDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FileDescriptorProto) GetPackage() string { + if x != nil && x.Package != nil { + return *x.Package + } + return "" +} + +func (x *FileDescriptorProto) GetDependency() []string { + if x != nil { + return x.Dependency + } + return nil +} + +func (x *FileDescriptorProto) GetPublicDependency() []int32 { + if x != nil { + return x.PublicDependency + } + return nil +} + +func (x *FileDescriptorProto) GetWeakDependency() []int32 { + if x != nil { + return x.WeakDependency + } + return nil +} + +func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if x != nil { + return x.Service + } + return nil +} + +func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *FileDescriptorProto) GetOptions() *FileOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if x != nil { + return x.SourceCodeInfo + } + return nil +} + +func (x *FileDescriptorProto) GetSyntax() string { + if x != nil && x.Syntax != nil { + return *x.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *DescriptorProto) Reset() { + *x = DescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto) ProtoMessage() {} + +func (x *DescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} +} + +func (x *DescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *DescriptorProto) GetField() []*FieldDescriptorProto { + if x != nil { + return x.Field + } + return nil +} + +func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *DescriptorProto) GetNestedType() []*DescriptorProto { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if x != nil { + return x.ExtensionRange + } + return nil +} + +func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if x != nil { + return x.OneofDecl + } + return nil +} + +func (x *DescriptorProto) GetOptions() *MessageOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *DescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +type ExtensionRangeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *ExtensionRangeOptions) Reset() { + *x = ExtensionRangeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions) ProtoMessage() {} + +func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} +} + +var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ExtensionRangeOptions +} + +func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` +} + +func (x *FieldDescriptorProto) Reset() { + *x = FieldDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldDescriptorProto) ProtoMessage() {} + +func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} +} + +func (x *FieldDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FieldDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if x != nil && x.Label != nil { + return *x.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldDescriptorProto) GetTypeName() string { + if x != nil && x.TypeName != nil { + return *x.TypeName + } + return "" +} + +func (x *FieldDescriptorProto) GetExtendee() string { + if x != nil && x.Extendee != nil { + return *x.Extendee + } + return "" +} + +func (x *FieldDescriptorProto) GetDefaultValue() string { + if x != nil && x.DefaultValue != nil { + return *x.DefaultValue + } + return "" +} + +func (x *FieldDescriptorProto) GetOneofIndex() int32 { + if x != nil && x.OneofIndex != nil { + return *x.OneofIndex + } + return 0 +} + +func (x *FieldDescriptorProto) GetJsonName() string { + if x != nil && x.JsonName != nil { + return *x.JsonName + } + return "" +} + +func (x *FieldDescriptorProto) GetOptions() *FieldOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FieldDescriptorProto) GetProto3Optional() bool { + if x != nil && x.Proto3Optional != nil { + return *x.Proto3Optional + } + return false +} + +// Describes a oneof. +type OneofDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (x *OneofDescriptorProto) Reset() { + *x = OneofDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofDescriptorProto) ProtoMessage() {} + +func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} +} + +func (x *OneofDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *OneofDescriptorProto) GetOptions() *OneofOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *EnumDescriptorProto) Reset() { + *x = EnumDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto) ProtoMessage() {} + +func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} +} + +func (x *EnumDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if x != nil { + return x.Value + } + return nil +} + +func (x *EnumDescriptorProto) GetOptions() *EnumOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *EnumValueDescriptorProto) Reset() { + *x = EnumValueDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} +} + +func (x *EnumValueDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumValueDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *ServiceDescriptorProto) Reset() { + *x = ServiceDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if x != nil { + return x.Method + } + return nil +} + +func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` +} + +// Default values for MethodDescriptorProto fields. +const ( + Default_MethodDescriptorProto_ClientStreaming = bool(false) + Default_MethodDescriptorProto_ServerStreaming = bool(false) +) + +func (x *MethodDescriptorProto) Reset() { + *x = MethodDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodDescriptorProto) ProtoMessage() {} + +func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} +} + +func (x *MethodDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *MethodDescriptorProto) GetInputType() string { + if x != nil && x.InputType != nil { + return *x.InputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOutputType() string { + if x != nil && x.OutputType != nil { + return *x.OutputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOptions() *MethodOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *MethodDescriptorProto) GetClientStreaming() bool { + if x != nil && x.ClientStreaming != nil { + return *x.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (x *MethodDescriptorProto) GetServerStreaming() bool { + if x != nil && x.ServerStreaming != nil { + return *x.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // + // Deprecated: Do not use. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FileOptions fields. +const ( + Default_FileOptions_JavaMultipleFiles = bool(false) + Default_FileOptions_JavaStringCheckUtf8 = bool(false) + Default_FileOptions_OptimizeFor = FileOptions_SPEED + Default_FileOptions_CcGenericServices = bool(false) + Default_FileOptions_JavaGenericServices = bool(false) + Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_PhpGenericServices = bool(false) + Default_FileOptions_Deprecated = bool(false) + Default_FileOptions_CcEnableArenas = bool(true) +) + +func (x *FileOptions) Reset() { + *x = FileOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileOptions) ProtoMessage() {} + +func (x *FileOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. +func (*FileOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} +} + +var extRange_FileOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_FileOptions +} + +func (x *FileOptions) GetJavaPackage() string { + if x != nil && x.JavaPackage != nil { + return *x.JavaPackage + } + return "" +} + +func (x *FileOptions) GetJavaOuterClassname() string { + if x != nil && x.JavaOuterClassname != nil { + return *x.JavaOuterClassname + } + return "" +} + +func (x *FileOptions) GetJavaMultipleFiles() bool { + if x != nil && x.JavaMultipleFiles != nil { + return *x.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash + } + return false +} + +func (x *FileOptions) GetJavaStringCheckUtf8() bool { + if x != nil && x.JavaStringCheckUtf8 != nil { + return *x.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if x != nil && x.OptimizeFor != nil { + return *x.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (x *FileOptions) GetGoPackage() string { + if x != nil && x.GoPackage != nil { + return *x.GoPackage + } + return "" +} + +func (x *FileOptions) GetCcGenericServices() bool { + if x != nil && x.CcGenericServices != nil { + return *x.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (x *FileOptions) GetJavaGenericServices() bool { + if x != nil && x.JavaGenericServices != nil { + return *x.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (x *FileOptions) GetPyGenericServices() bool { + if x != nil && x.PyGenericServices != nil { + return *x.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (x *FileOptions) GetPhpGenericServices() bool { + if x != nil && x.PhpGenericServices != nil { + return *x.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (x *FileOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (x *FileOptions) GetCcEnableArenas() bool { + if x != nil && x.CcEnableArenas != nil { + return *x.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (x *FileOptions) GetObjcClassPrefix() string { + if x != nil && x.ObjcClassPrefix != nil { + return *x.ObjcClassPrefix + } + return "" +} + +func (x *FileOptions) GetCsharpNamespace() string { + if x != nil && x.CsharpNamespace != nil { + return *x.CsharpNamespace + } + return "" +} + +func (x *FileOptions) GetSwiftPrefix() string { + if x != nil && x.SwiftPrefix != nil { + return *x.SwiftPrefix + } + return "" +} + +func (x *FileOptions) GetPhpClassPrefix() string { + if x != nil && x.PhpClassPrefix != nil { + return *x.PhpClassPrefix + } + return "" +} + +func (x *FileOptions) GetPhpNamespace() string { + if x != nil && x.PhpNamespace != nil { + return *x.PhpNamespace + } + return "" +} + +func (x *FileOptions) GetPhpMetadataNamespace() string { + if x != nil && x.PhpMetadataNamespace != nil { + return *x.PhpMetadataNamespace + } + return "" +} + +func (x *FileOptions) GetRubyPackage() string { + if x != nil && x.RubyPackage != nil { + return *x.RubyPackage + } + return "" +} + +func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MessageOptions fields. +const ( + Default_MessageOptions_MessageSetWireFormat = bool(false) + Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) + Default_MessageOptions_Deprecated = bool(false) +) + +func (x *MessageOptions) Reset() { + *x = MessageOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOptions) ProtoMessage() {} + +func (x *MessageOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. +func (*MessageOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} +} + +var extRange_MessageOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_MessageOptions +} + +func (x *MessageOptions) GetMessageSetWireFormat() bool { + if x != nil && x.MessageSetWireFormat != nil { + return *x.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if x != nil && x.NoStandardDescriptorAccessor != nil { + return *x.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (x *MessageOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (x *MessageOptions) GetMapEntry() bool { + if x != nil && x.MapEntry != nil { + return *x.MapEntry + } + return false +} + +func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FieldOptions fields. +const ( + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) +) + +func (x *FieldOptions) Reset() { + *x = FieldOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions) ProtoMessage() {} + +func (x *FieldOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. +func (*FieldOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} +} + +var extRange_FieldOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_FieldOptions +} + +func (x *FieldOptions) GetCtype() FieldOptions_CType { + if x != nil && x.Ctype != nil { + return *x.Ctype + } + return Default_FieldOptions_Ctype +} + +func (x *FieldOptions) GetPacked() bool { + if x != nil && x.Packed != nil { + return *x.Packed + } + return false +} + +func (x *FieldOptions) GetJstype() FieldOptions_JSType { + if x != nil && x.Jstype != nil { + return *x.Jstype + } + return Default_FieldOptions_Jstype +} + +func (x *FieldOptions) GetLazy() bool { + if x != nil && x.Lazy != nil { + return *x.Lazy + } + return Default_FieldOptions_Lazy +} + +func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (x *FieldOptions) GetWeak() bool { + if x != nil && x.Weak != nil { + return *x.Weak + } + return Default_FieldOptions_Weak +} + +func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *OneofOptions) Reset() { + *x = OneofOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofOptions) ProtoMessage() {} + +func (x *OneofOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. +func (*OneofOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} +} + +var extRange_OneofOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_OneofOptions +} + +func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumOptions fields. +const ( + Default_EnumOptions_Deprecated = bool(false) +) + +func (x *EnumOptions) Reset() { + *x = EnumOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumOptions) ProtoMessage() {} + +func (x *EnumOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. +func (*EnumOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} +} + +var extRange_EnumOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_EnumOptions +} + +func (x *EnumOptions) GetAllowAlias() bool { + if x != nil && x.AllowAlias != nil { + return *x.AllowAlias + } + return false +} + +func (x *EnumOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumValueOptions fields. +const ( + Default_EnumValueOptions_Deprecated = bool(false) +) + +func (x *EnumValueOptions) Reset() { + *x = EnumValueOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueOptions) ProtoMessage() {} + +func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} +} + +var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_EnumValueOptions +} + +func (x *EnumValueOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for ServiceOptions fields. +const ( + Default_ServiceOptions_Deprecated = bool(false) +) + +func (x *ServiceOptions) Reset() { + *x = ServiceOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceOptions) ProtoMessage() {} + +func (x *ServiceOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} +} + +var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ServiceOptions +} + +func (x *ServiceOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MethodOptions fields. +const ( + Default_MethodOptions_Deprecated = bool(false) + Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN +) + +func (x *MethodOptions) Reset() { + *x = MethodOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodOptions) ProtoMessage() {} + +func (x *MethodOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. +func (*MethodOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} +} + +var extRange_MethodOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_MethodOptions +} + +func (x *MethodOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if x != nil && x.IdempotencyLevel != nil { + return *x.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` +} + +func (x *UninterpretedOption) Reset() { + *x = UninterpretedOption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption) ProtoMessage() {} + +func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} +} + +func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if x != nil { + return x.Name + } + return nil +} + +func (x *UninterpretedOption) GetIdentifierValue() string { + if x != nil && x.IdentifierValue != nil { + return *x.IdentifierValue + } + return "" +} + +func (x *UninterpretedOption) GetPositiveIntValue() uint64 { + if x != nil && x.PositiveIntValue != nil { + return *x.PositiveIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` +} + +func (x *SourceCodeInfo) Reset() { + *x = SourceCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo) ProtoMessage() {} + +func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` +} + +func (x *GeneratedCodeInfo) Reset() { + *x = GeneratedCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo) ProtoMessage() {} + +func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if x != nil { + return x.Annotation + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *DescriptorProto_ExtensionRange) Reset() { + *x = DescriptorProto_ExtensionRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ExtensionRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DescriptorProto_ExtensionRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. +} + +func (x *DescriptorProto_ReservedRange) Reset() { + *x = DescriptorProto_ReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *DescriptorProto_ReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. +} + +func (x *EnumDescriptorProto_EnumReservedRange) Reset() { + *x = EnumDescriptorProto_EnumReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto_EnumReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} + +func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` +} + +func (x *UninterpretedOption_NamePart) Reset() { + *x = UninterpretedOption_NamePart{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption_NamePart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *UninterpretedOption_NamePart) GetNamePart() string { + if x != nil && x.NamePart != nil { + return *x.NamePart + } + return "" +} + +func (x *UninterpretedOption_NamePart) GetIsExtension() bool { + if x != nil && x.IsExtension != nil { + return *x.IsExtension + } + return false +} + +type SourceCodeInfo_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +} + +func (x *SourceCodeInfo_Location) Reset() { + *x = SourceCodeInfo_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *SourceCodeInfo_Location) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *SourceCodeInfo_Location) GetSpan() []int32 { + if x != nil { + return x.Span + } + return nil +} + +func (x *SourceCodeInfo_Location) GetLeadingComments() string { + if x != nil && x.LeadingComments != nil { + return *x.LeadingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetTrailingComments() string { + if x != nil && x.TrailingComments != nil { + return *x.TrailingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if x != nil { + return x.LeadingDetachedComments + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` +} + +func (x *GeneratedCodeInfo_Annotation) Reset() { + *x = GeneratedCodeInfo_Annotation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo_Annotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} + +func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if x != nil && x.SourceFile != nil { + return *x.SourceFile + } + return "" +} + +func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if x != nil && x.Begin != nil { + return *x.Begin + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + +var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, + 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, + 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, + 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, + 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, + 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, + 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, + 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, + 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, + 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, + 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, + 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, + 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, + 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, + 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, + 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, + 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, + 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, + 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, + 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, + 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, + 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, +} + +var ( + file_google_protobuf_descriptor_proto_rawDescOnce sync.Once + file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc +) + +func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + }) + return file_google_protobuf_descriptor_proto_rawDescData +} + +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType + (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel + (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 16: google.protobuf.FileOptions + (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation +} +var file_google_protobuf_descriptor_proto_depIdxs = []int32{ + 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 43, // [43:43] is the sub-list for method output_type + 43, // [43:43] is the sub-list for method input_type + 43, // [43:43] is the sub-list for extension type_name + 43, // [43:43] is the sub-list for extension extendee + 0, // [0:43] is the sub-list for field type_name +} + +func init() { file_google_protobuf_descriptor_proto_init() } +func file_google_protobuf_descriptor_proto_init() { + if File_google_protobuf_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRangeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ExtensionRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo_Annotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + NumEnums: 6, + NumMessages: 27, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_descriptor_proto_goTypes, + DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, + EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, + MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, + }.Build() + File_google_protobuf_descriptor_proto = out.File + file_google_protobuf_descriptor_proto_rawDesc = nil + file_google_protobuf_descriptor_proto_goTypes = nil + file_google_protobuf_descriptor_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 82a473e2652..8c10797b905 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -166,10 +166,13 @@ import ( // Example 4: Pack and unpack a message in Go // // foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } // ... // foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// if err := any.UnmarshalTo(foo); err != nil { // ... // } // @@ -420,14 +423,15 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index f7a11099404..a583ca2f6c7 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -303,16 +303,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, - 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, - 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c25e4bd7d0d..c9ae92132aa 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -134,7 +134,16 @@ import ( // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // -// Example 5: Compute Timestamp from current time in Python. +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() @@ -306,15 +315,15 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index fe2b724f693..25483fad138 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -81,6 +81,7 @@ import ( "math" "os" "path/filepath" + "reflect" "runtime" "strconv" "strings" @@ -433,7 +434,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") @@ -780,7 +781,7 @@ func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth loggr.Error(err, msg, keysAndValues...) return } - l.printS(err, depth+1, msg, keysAndValues...) + l.printS(err, errorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. @@ -792,12 +793,12 @@ func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg str loggr.Info(msg, keysAndValues...) return } - l.printS(nil, depth+1, msg, keysAndValues...) + l.printS(nil, infoLog, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. -// if err arguments is specified, will output to errorLog severity -func (l *loggingT) printS(err error, depth int, msg string, keysAndValues ...interface{}) { +// set log severity by s +func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { b := &bytes.Buffer{} b.WriteString(fmt.Sprintf("%q", msg)) if err != nil { @@ -805,12 +806,6 @@ func (l *loggingT) printS(err error, depth int, msg string, keysAndValues ...int b.WriteString(fmt.Sprintf("err=%q", err.Error())) } kvListFormat(b, keysAndValues...) - var s severity - if err == nil { - s = infoLog - } else { - s = errorLog - } l.printDepth(s, logging.logr, nil, depth+1, b) } @@ -1583,6 +1578,13 @@ type KMetadata interface { // KObj returns ObjectRef from ObjectMeta func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + return ObjectRef{ Name: obj.GetName(), Namespace: obj.GetNamespace(), diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 0a55a844ee7..1da6f6664a3 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -46,86 +46,182 @@ func AllPtrFieldsNil(obj interface{}) bool { return true } -// Int32Ptr returns a pointer to an int32 -func Int32Ptr(i int32) *int32 { +// Int32 returns a pointer to an int32. +func Int32(i int32) *int32 { return &i } -// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil, -// else returns def. -func Int32PtrDerefOr(ptr *int32, def int32) int32 { +var Int32Ptr = Int32 // for back-compat + +// Int32Deref dereferences the int32 ptr and returns it if not nil, or else +// returns def. +func Int32Deref(ptr *int32, def int32) int32 { if ptr != nil { return *ptr } return def } -// Int64Ptr returns a pointer to an int64 -func Int64Ptr(i int64) *int64 { +var Int32PtrDerefOr = Int32Deref // for back-compat + +// Int32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Int32Equal(a, b *int32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Int64 returns a pointer to an int64. +func Int64(i int64) *int64 { return &i } -// Int64PtrDerefOr dereference the int64 ptr and returns it if not nil, -// else returns def. -func Int64PtrDerefOr(ptr *int64, def int64) int64 { +var Int64Ptr = Int64 // for back-compat + +// Int64Deref dereferences the int64 ptr and returns it if not nil, or else +// returns def. +func Int64Deref(ptr *int64, def int64) int64 { if ptr != nil { return *ptr } return def } -// BoolPtr returns a pointer to a bool -func BoolPtr(b bool) *bool { +var Int64PtrDerefOr = Int64Deref // for back-compat + +// Int64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Int64Equal(a, b *int64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Bool returns a pointer to a bool. +func Bool(b bool) *bool { return &b } -// BoolPtrDerefOr dereference the bool ptr and returns it if not nil, -// else returns def. -func BoolPtrDerefOr(ptr *bool, def bool) bool { +var BoolPtr = Bool // for back-compat + +// BoolDeref dereferences the bool ptr and returns it if not nil, or else +// returns def. +func BoolDeref(ptr *bool, def bool) bool { if ptr != nil { return *ptr } return def } -// StringPtr returns a pointer to the passed string. -func StringPtr(s string) *string { +var BoolPtrDerefOr = BoolDeref // for back-compat + +// BoolEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func BoolEqual(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// String returns a pointer to a string. +func String(s string) *string { return &s } -// StringPtrDerefOr dereference the string ptr and returns it if not nil, -// else returns def. -func StringPtrDerefOr(ptr *string, def string) string { +var StringPtr = String // for back-compat + +// StringDeref dereferences the string ptr and returns it if not nil, or else +// returns def. +func StringDeref(ptr *string, def string) string { if ptr != nil { return *ptr } return def } -// Float32Ptr returns a pointer to the passed float32. -func Float32Ptr(i float32) *float32 { +var StringPtrDerefOr = StringDeref // for back-compat + +// StringEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func StringEqual(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Float32 returns a pointer to the a float32. +func Float32(i float32) *float32 { return &i } -// Float32PtrDerefOr dereference the float32 ptr and returns it if not nil, -// else returns def. -func Float32PtrDerefOr(ptr *float32, def float32) float32 { +var Float32Ptr = Float32 + +// Float32Deref dereferences the float32 ptr and returns it if not nil, or else +// returns def. +func Float32Deref(ptr *float32, def float32) float32 { if ptr != nil { return *ptr } return def } -// Float64Ptr returns a pointer to the passed float64. -func Float64Ptr(i float64) *float64 { +var Float32PtrDerefOr = Float32Deref // for back-compat + +// Float32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Float32Equal(a, b *float32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Float64 returns a pointer to the a float64. +func Float64(i float64) *float64 { return &i } -// Float64PtrDerefOr dereference the float64 ptr and returns it if not nil, -// else returns def. -func Float64PtrDerefOr(ptr *float64, def float64) float64 { +var Float64Ptr = Float64 + +// Float64Deref dereferences the float64 ptr and returns it if not nil, or else +// returns def. +func Float64Deref(ptr *float64, def float64) float64 { if ptr != nil { return *ptr } return def } + +var Float64PtrDerefOr = Float64Deref // for back-compat + +// Float64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Float64Equal(a, b *float64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8f7722d4ac7..8592c7db5b0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,12 +1,11 @@ # 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a 4d63.com/gochecknoglobals/checknoglobals -# cloud.google.com/go v0.77.0 -## explicit +# cloud.google.com/go v0.81.0 cloud.google.com/go/compute/metadata # github.com/AlekSi/gocov-xml v0.0.0-20190121064608-3a14fb1c4737 ## explicit github.com/AlekSi/gocov-xml -# github.com/Azure/azure-sdk-for-go v52.5.0+incompatible +# github.com/Azure/azure-sdk-for-go v53.2.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute @@ -65,11 +64,12 @@ github.com/Azure/go-autorest/tracing github.com/BurntSushi/toml # github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5 github.com/Djarvur/go-err113 -# github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd +# github.com/MakeNowJust/heredoc v1.0.0 +## explicit github.com/MakeNowJust/heredoc # github.com/Masterminds/semver v1.5.0 github.com/Masterminds/semver -# github.com/Microsoft/go-winio v0.4.16 +# github.com/Microsoft/go-winio v0.4.17 ## explicit github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid @@ -89,7 +89,7 @@ github.com/alvaroloes/enumer # github.com/apparentlymart/go-cidr v1.1.0 ## explicit github.com/apparentlymart/go-cidr/cidr -# github.com/aws/aws-sdk-go v1.37.12 +# github.com/aws/aws-sdk-go v1.38.20 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -168,7 +168,7 @@ github.com/codahale/etm github.com/containers/image/docker/reference github.com/containers/image/pkg/sysregistriesv2 github.com/containers/image/types -# github.com/containers/image/v5 v5.10.2 +# github.com/containers/image/v5 v5.11.0 ## explicit github.com/containers/image/v5/copy github.com/containers/image/v5/docker @@ -180,6 +180,7 @@ github.com/containers/image/v5/internal/iolimits github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform github.com/containers/image/v5/internal/rootless +github.com/containers/image/v5/internal/types github.com/containers/image/v5/internal/uploadreader github.com/containers/image/v5/manifest github.com/containers/image/v5/pkg/blobinfocache @@ -201,7 +202,7 @@ github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a ## explicit github.com/containers/libtrust -# github.com/containers/ocicrypt v1.1.0 +# github.com/containers/ocicrypt v1.1.1 ## explicit github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher @@ -217,7 +218,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/storage v1.25.0 +# github.com/containers/storage v1.29.0 ## explicit github.com/containers/storage/pkg/homedir github.com/containers/storage/pkg/idtools @@ -232,9 +233,10 @@ github.com/containers/storage/pkg/unshare github.com/coreos/go-oidc # github.com/coreos/go-semver v0.3.0 github.com/coreos/go-semver/semver -# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f +# github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf +## explicit github.com/coreos/go-systemd/unit -# github.com/coreos/go-systemd/v22 v22.1.0 +# github.com/coreos/go-systemd/v22 v22.3.1 ## explicit github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/unit @@ -249,7 +251,8 @@ github.com/coreos/ignition/v2/config/shared/errors github.com/coreos/ignition/v2/config/shared/validations github.com/coreos/ignition/v2/config/util github.com/coreos/ignition/v2/config/v3_1/types -# github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c +# github.com/coreos/vcontext v0.0.0-20210407161507-4ee6c745c8bd +## explicit github.com/coreos/vcontext/path github.com/coreos/vcontext/report github.com/coreos/vcontext/tree @@ -273,7 +276,7 @@ github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory -# github.com/docker/docker v20.10.3+incompatible +# github.com/docker/docker v20.10.6+incompatible ## explicit github.com/docker/docker/api/types/versions # github.com/docker/docker-credential-helpers v0.6.3 @@ -296,7 +299,8 @@ github.com/emicklei/go-restful github.com/emicklei/go-restful/log # github.com/evanphx/json-patch v4.9.0+incompatible github.com/evanphx/json-patch -# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d +# github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f +## explicit github.com/exponent-io/jsonpath # github.com/fatih/color v1.10.0 github.com/fatih/color @@ -320,21 +324,24 @@ github.com/go-critic/go-critic/framework/linter # github.com/go-logr/logr v0.4.0 ## explicit github.com/go-logr/logr -# github.com/go-openapi/jsonpointer v0.19.3 +# github.com/go-openapi/jsonpointer v0.19.5 +## explicit github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.19.3 +# github.com/go-openapi/jsonreference v0.19.5 +## explicit github.com/go-openapi/jsonreference # github.com/go-openapi/spec v0.20.3 => github.com/go-openapi/spec v0.19.8 ## explicit github.com/go-openapi/spec -# github.com/go-openapi/swag v0.19.5 +# github.com/go-openapi/swag v0.19.15 +## explicit github.com/go-openapi/swag # github.com/go-playground/locales v0.13.0 github.com/go-playground/locales github.com/go-playground/locales/currency # github.com/go-playground/universal-translator v0.17.0 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.4.1 +# github.com/go-playground/validator/v10 v10.5.0 ## explicit github.com/go-playground/validator/v10 # github.com/go-test/deep v1.0.7 @@ -369,26 +376,28 @@ github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings # github.com/gofrs/flock v0.8.0 github.com/gofrs/flock -# github.com/gofrs/uuid v3.3.0+incompatible +# github.com/gofrs/uuid v4.0.0+incompatible ## explicit github.com/gofrs/uuid # github.com/gogo/protobuf v1.3.2 -## explicit github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e +# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +## explicit github.com/golang/groupcache/lru -# github.com/golang/mock v1.4.4 +# github.com/golang/mock v1.5.0 ## explicit github.com/golang/mock/gomock github.com/golang/mock/mockgen github.com/golang/mock/mockgen/model -# github.com/golang/protobuf v1.4.3 +# github.com/golang/protobuf v1.5.2 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +# github.com/golang/snappy v0.0.3 +github.com/golang/snappy # github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 github.com/golangci/check/cmd/structcheck github.com/golangci/check/cmd/varcheck @@ -453,9 +462,10 @@ github.com/golangci/prealloc github.com/golangci/revgrep # github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 github.com/golangci/unconvert -# github.com/google/btree v1.0.0 +# github.com/google/btree v1.0.1 +## explicit github.com/google/btree -# github.com/google/go-cmp v0.5.4 +# github.com/google/go-cmp v0.5.5 ## explicit github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -477,7 +487,8 @@ github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/jsonschema github.com/googleapis/gnostic/openapiv2 -# github.com/gophercloud/gophercloud v0.15.1-0.20210202035223-633d73521055 +# github.com/gophercloud/gophercloud v0.17.0 +## explicit github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/common/extensions @@ -499,7 +510,7 @@ github.com/gophercloud/gophercloud/openstack/networking/v2/networks github.com/gophercloud/gophercloud/openstack/networking/v2/subnets github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/pagination -# github.com/gophercloud/utils v0.0.0-20210216074907-f6de111f2eae +# github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f ## explicit github.com/gophercloud/utils/env github.com/gophercloud/utils/gnocchi @@ -533,6 +544,11 @@ github.com/gregjones/httpcache/diskcache github.com/h2non/filetype/matchers github.com/h2non/filetype/matchers/isobmff github.com/h2non/filetype/types +# github.com/hashicorp/errwrap v1.1.0 +## explicit +github.com/hashicorp/errwrap +# github.com/hashicorp/go-multierror v1.1.1 +github.com/hashicorp/go-multierror # github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -547,7 +563,8 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/imdario/mergo v0.3.11 +# github.com/imdario/mergo v0.3.12 +## explicit github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 github.com/inconshreveable/mousetrap @@ -575,11 +592,11 @@ github.com/kballard/go-shellquote # github.com/kisielk/gotool v1.0.0 github.com/kisielk/gotool github.com/kisielk/gotool/internal/load -# github.com/klauspost/compress v1.11.7 +# github.com/klauspost/compress v1.12.1 +## explicit github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 -github.com/klauspost/compress/snappy github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/pgzip v1.2.5 @@ -594,10 +611,6 @@ github.com/leodido/go-urn github.com/libvirt/libvirt-go # github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de github.com/liggitt/tabwriter -# github.com/magefile/mage v1.11.0 -## explicit -github.com/magefile/mage/mg -github.com/magefile/mage/sh # github.com/magiconair/properties v1.8.1 github.com/magiconair/properties # github.com/mailru/easyjson v0.7.7 @@ -613,7 +626,7 @@ github.com/matoous/godox github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.12 github.com/mattn/go-isatty -# github.com/mattn/go-runewidth v0.0.10 +# github.com/mattn/go-runewidth v0.0.12 ## explicit github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 @@ -624,7 +637,7 @@ github.com/mbilski/exhaustivestruct/pkg/analyzer github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1 github.com/metal3-io/baremetal-operator/pkg/bmc github.com/metal3-io/baremetal-operator/pkg/hardware -# github.com/metal3-io/cluster-api-provider-baremetal v0.2.2 => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20201105032354-fcd9e769a45c +# github.com/metal3-io/cluster-api-provider-baremetal v0.2.2 => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210304234017-16b67b78b538 ## explicit github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1 @@ -635,13 +648,15 @@ github.com/mgutz/ansi github.com/miekg/pkcs11 # github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir -# github.com/mitchellh/go-wordwrap v1.0.0 +# github.com/mitchellh/go-wordwrap v1.0.1 +## explicit github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.3.0 github.com/mitchellh/mapstructure -# github.com/moby/sys/mountinfo v0.4.0 +# github.com/moby/sys/mountinfo v0.4.1 github.com/moby/sys/mountinfo -# github.com/moby/term v0.0.0-20200312100748-672ec06f55cd +# github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 +## explicit github.com/moby/term github.com/moby/term/windows # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -669,16 +684,16 @@ github.com/nbutton23/zxcvbn-go/utils/math # github.com/nishanths/exhaustive v0.1.0 github.com/nishanths/exhaustive # github.com/nxadm/tail v1.4.8 -## explicit github.com/nxadm/tail github.com/nxadm/tail/ratelimiter github.com/nxadm/tail/util github.com/nxadm/tail/watch github.com/nxadm/tail/winfile -# github.com/onsi/ginkgo v1.15.0 +# github.com/onsi/ginkgo v1.16.1 ## explicit github.com/onsi/ginkgo github.com/onsi/ginkgo/config +github.com/onsi/ginkgo/formatter github.com/onsi/ginkgo/internal/codelocation github.com/onsi/ginkgo/internal/containernode github.com/onsi/ginkgo/internal/failer @@ -696,7 +711,7 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.10.5 +# github.com/onsi/gomega v1.11.0 ## explicit github.com/onsi/gomega github.com/onsi/gomega/format @@ -715,9 +730,9 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb +# github.com/opencontainers/runc v1.0.0-rc93 github.com/opencontainers/runc/libcontainer/user -# github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 +# github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d github.com/opencontainers/runtime-spec/specs-go # github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible => github.com/openshift/api v0.0.0-20210127195806-54e5e88cf848 ## explicit @@ -753,7 +768,7 @@ github.com/openshift/client-go/samples/clientset/versioned/typed/samples/v1 github.com/openshift/client-go/security/clientset/versioned github.com/openshift/client-go/security/clientset/versioned/scheme github.com/openshift/client-go/security/clientset/versioned/typed/security/v1 -# github.com/openshift/cloud-credential-operator v0.0.0-20210217002926-dce99f70f22c => github.com/openshift/cloud-credential-operator v0.0.0-20201202215507-371eb009d9a1 +# github.com/openshift/cloud-credential-operator v0.0.0-20210415025441-3b263dd9f565 => github.com/openshift/cloud-credential-operator v0.0.0-20201202215507-371eb009d9a1 ## explicit github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1 github.com/openshift/cloud-credential-operator/pkg/aws @@ -761,19 +776,19 @@ github.com/openshift/cloud-credential-operator/pkg/version # github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 => github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 github.com/openshift/cluster-api/pkg/apis/machine/common github.com/openshift/cluster-api/pkg/apis/machine/v1beta1 -# github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210126223742-4e254d08e0ad +# github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210322184723-0e61e5b3c9a5 ## explicit github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1 -# github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570 => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002153134-a0fc9aa4ce81 +# github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570 => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210324200451-9b15b1192f37 github.com/openshift/cluster-api-provider-gcp/pkg/apis github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1 -# github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 => github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20200919090150-1ca52adab176 +# github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 => github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20210326022320-b3e2718f198e github.com/openshift/cluster-api-provider-libvirt/pkg/apis github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1 # github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43 => github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20210210114935-91f12f3f7dee github.com/openshift/cluster-api-provider-ovirt/pkg/apis github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1 -# github.com/openshift/console-operator v0.0.0-20210216151626-6e1cbc849915 => github.com/openshift/console-operator v0.0.0-20210116095614-7fd78a283616 +# github.com/openshift/console-operator v0.0.0-20210414121046-73358d9408c4 => github.com/openshift/console-operator v0.0.0-20210116095614-7fd78a283616 ## explicit github.com/openshift/console-operator/pkg/api # github.com/openshift/installer v0.16.1 => github.com/jim-minter/installer v0.9.0-master.0.20210407195006-452c09da58ae @@ -875,10 +890,10 @@ github.com/openshift/installer/pkg/types/vsphere/defaults github.com/openshift/installer/pkg/types/vsphere/validation github.com/openshift/installer/pkg/validate github.com/openshift/installer/pkg/version -# github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916 +# github.com/openshift/library-go v0.0.0-20210414082648-6e767630a0dc => github.com/openshift/library-go v0.0.0-20210204141222-0e7715cd7725 ## explicit github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers -# github.com/openshift/machine-api-operator v0.2.1-0.20210104142355-8e6ae0acdfcf => github.com/openshift/machine-api-operator v0.2.1-0.20210212025836-cb508cd8777d +# github.com/openshift/machine-api-operator v0.2.1-0.20210212025836-cb508cd8777d => github.com/openshift/machine-api-operator v0.2.1-0.20210304062120-b5fb76b27015 ## explicit github.com/openshift/machine-api-operator/pkg/apis/machine github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1 @@ -889,7 +904,7 @@ github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/fake github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1 github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/fake -# github.com/openshift/machine-config-operator v4.2.0-alpha.0.0.20190917115525-033375cbe820+incompatible => github.com/openshift/machine-config-operator v0.0.1-0.20210211205336-14a2b82d9f4c +# github.com/openshift/machine-config-operator v4.2.0-alpha.0.0.20190917115525-033375cbe820+incompatible => github.com/openshift/machine-config-operator v0.0.1-0.20210331171350-d5dc2b519aed ## explicit github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1 github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned @@ -897,7 +912,7 @@ github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/f github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/typed/machineconfiguration.openshift.io/v1 github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/typed/machineconfiguration.openshift.io/v1/fake -# github.com/operator-framework/operator-sdk v1.4.2 => github.com/operator-framework/operator-sdk v0.19.4 +# github.com/operator-framework/operator-sdk v1.6.1 => github.com/operator-framework/operator-sdk v0.19.4 ## explicit github.com/operator-framework/operator-sdk/pkg/status # github.com/ovirt/go-ovirt v0.0.0-20210112072624-e4d3b104de71 @@ -920,24 +935,23 @@ github.com/pkg/errors github.com/pmezard/go-difflib/difflib # github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3 github.com/polyfloyd/go-errorlint/errorlint -# github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac +# github.com/pquerna/cachecontrol v0.1.0 ## explicit github.com/pquerna/cachecontrol github.com/pquerna/cachecontrol/cacheobject -# github.com/prometheus/client_golang v1.9.0 +# github.com/prometheus/client_golang v1.10.0 ## explicit github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.15.0 +# github.com/prometheus/common v0.20.0 ## explicit github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model # github.com/prometheus/procfs v0.6.0 -## explicit github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -949,9 +963,9 @@ github.com/quasilyte/go-ruleguard/ruleguard/typematch # github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 github.com/quasilyte/regex/syntax # github.com/rivo/uniseg v0.2.0 -## explicit github.com/rivo/uniseg -# github.com/russross/blackfriday v1.5.2 +# github.com/russross/blackfriday v1.6.0 +## explicit github.com/russross/blackfriday # github.com/ryancurrah/gomodguard v1.1.0 github.com/ryancurrah/gomodguard @@ -962,7 +976,7 @@ github.com/securego/gosec/v2 github.com/securego/gosec/v2/rules # github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c github.com/shazow/go-diff/difflib -# github.com/sirupsen/logrus v1.7.1 +# github.com/sirupsen/logrus v1.8.1 ## explicit github.com/sirupsen/logrus github.com/sirupsen/logrus/hooks/test @@ -977,7 +991,8 @@ github.com/spf13/afero github.com/spf13/afero/mem # github.com/spf13/cast v1.3.1 github.com/spf13/cast -# github.com/spf13/cobra v1.1.1 +# github.com/spf13/cobra v1.1.3 +## explicit github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 github.com/spf13/jwalterweatherman @@ -997,7 +1012,6 @@ github.com/stretchr/testify/mock # github.com/subosito/gotenv v1.2.0 github.com/subosito/gotenv # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 -## explicit github.com/syndtr/gocapability/capability # github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2 github.com/tdakkota/asciicheck @@ -1011,11 +1025,10 @@ github.com/tomarrell/wrapcheck/wrapcheck github.com/tommy-muehle/go-mnd github.com/tommy-muehle/go-mnd/checks github.com/tommy-muehle/go-mnd/config -# github.com/ugorji/go/codec v1.2.5-0.20210320190651-a2bb12368408 => github.com/ugorji/go/codec v1.2.2 +# github.com/ugorji/go/codec v1.2.5 => github.com/ugorji/go/codec v1.2.2 ## explicit github.com/ugorji/go/codec # github.com/ulikunitz/xz v0.5.10 -## explicit github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog @@ -1026,14 +1039,14 @@ github.com/ultraware/funlen github.com/ultraware/whitespace # github.com/uudashr/gocognit v1.0.1 github.com/uudashr/gocognit -# github.com/vbauerster/mpb/v5 v5.4.0 -github.com/vbauerster/mpb/v5 -github.com/vbauerster/mpb/v5/cwriter -github.com/vbauerster/mpb/v5/decor -github.com/vbauerster/mpb/v5/internal +# github.com/vbauerster/mpb/v6 v6.0.3 +github.com/vbauerster/mpb/v6 +github.com/vbauerster/mpb/v6/cwriter +github.com/vbauerster/mpb/v6/decor +github.com/vbauerster/mpb/v6/internal # github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 github.com/vincent-petithory/dataurl -# github.com/vmware/govmomi v0.24.0 +# github.com/vmware/govmomi v0.24.1 ## explicit github.com/vmware/govmomi github.com/vmware/govmomi/find @@ -1059,8 +1072,7 @@ github.com/vmware/govmomi/vim25/xml go.etcd.io/bbolt # go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 go.mozilla.org/pkcs7 -# go.opencensus.io v0.22.6 -## explicit +# go.opencensus.io v0.23.0 go.opencensus.io go.opencensus.io/internal go.opencensus.io/internal/tagencoding @@ -1077,7 +1089,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad +# golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 ## explicit golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -1103,12 +1115,13 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/mod v0.4.1 +# golang.org/x/mod v0.4.2 +## explicit golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20210119194325-5f4716e94777 +# golang.org/x/net v0.0.0-20210414194228-064579744ee0 ## explicit golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -1123,7 +1136,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e +# golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 ## explicit golang.org/x/oauth2 golang.org/x/oauth2/google @@ -1132,11 +1145,11 @@ golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt golang.org/x/oauth2/microsoft -# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20210217105451-b926d437f341 +# golang.org/x/sys v0.0.0-20210415045647-66c3f260301c ## explicit golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1144,10 +1157,10 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf +# golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 ## explicit golang.org/x/term -# golang.org/x/text v0.3.5 +# golang.org/x/text v0.3.6 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -1169,7 +1182,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 +# golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba ## explicit golang.org/x/time/rate # golang.org/x/tools v0.1.0 @@ -1244,7 +1257,8 @@ golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.1.0 ## explicit gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.40.0 +# google.golang.org/api v0.44.0 +## explicit google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 google.golang.org/api/dns/v1 @@ -1272,9 +1286,11 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d +# google.golang.org/genproto v0.0.0-20210414175830-92282443c685 +## explicit google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.35.0 +# google.golang.org/grpc v1.37.0 +## explicit google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1319,7 +1335,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.25.0 +# google.golang.org/protobuf v1.26.0 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -1330,22 +1346,23 @@ google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/fieldsort google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/filetype google.golang.org/protobuf/internal/flags google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl -google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl +google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb @@ -1398,7 +1415,7 @@ honnef.co/go/tools/staticcheck honnef.co/go/tools/stylecheck honnef.co/go/tools/unused honnef.co/go/tools/version -# k8s.io/api v0.20.2 => k8s.io/api v0.19.0-rc.2 +# k8s.io/api v0.21.0 => k8s.io/api v0.19.0-rc.2 ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1444,7 +1461,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.20.2 => k8s.io/apiextensions-apiserver v0.19.0-rc.2 +# k8s.io/apiextensions-apiserver v0.21.0 => k8s.io/apiextensions-apiserver v0.19.0-rc.2 ## explicit k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1453,7 +1470,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.20.2 => k8s.io/apimachinery v0.19.0-rc.2 +# k8s.io/apimachinery v0.21.0 => k8s.io/apimachinery v0.19.0-rc.2 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1509,10 +1526,12 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.19.0 => k8s.io/apiserver v0.19.0-rc.2 +# k8s.io/apiserver v0.21.0 => k8s.io/apiserver v0.19.0-rc.2 +## explicit k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/util/feature -# k8s.io/cli-runtime v0.19.0-rc.2 => k8s.io/cli-runtime v0.19.0-rc.2 +# k8s.io/cli-runtime v0.21.0 => k8s.io/cli-runtime v0.19.0-rc.2 +## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize/k8sdeps @@ -1674,7 +1693,8 @@ k8s.io/code-generator/cmd/client-gen/path k8s.io/code-generator/cmd/client-gen/types k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util -# k8s.io/component-base v0.19.2 => k8s.io/component-base v0.19.0-rc.2 +# k8s.io/component-base v0.21.0 => k8s.io/component-base v0.19.0-rc.2 +## explicit k8s.io/component-base/featuregate k8s.io/component-base/version # k8s.io/gengo v0.0.0-20200518160137-fb547a11e5e0 @@ -1685,15 +1705,15 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/klog/v2 v2.5.0 +# k8s.io/klog/v2 v2.8.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6 => k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 +# k8s.io/kube-openapi v0.0.0-20210323165736-1a6458611d18 => k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 ## explicit k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation -# k8s.io/kubectl v0.19.4 => k8s.io/kubectl v0.19.0-rc.2 +# k8s.io/kubectl v0.21.0 => k8s.io/kubectl v0.19.0-rc.2 ## explicit k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/drain @@ -1704,7 +1724,7 @@ k8s.io/kubectl/pkg/util/openapi/validation k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubernetes v1.13.0 => k8s.io/kubernetes v1.19.0-rc.2 +# k8s.io/kubernetes v1.21.0 => k8s.io/kubernetes v1.19.0-rc.2 ## explicit k8s.io/kubernetes/pkg/apis/apps k8s.io/kubernetes/pkg/apis/apps/v1 @@ -1715,7 +1735,7 @@ k8s.io/kubernetes/pkg/apis/rbac k8s.io/kubernetes/pkg/apis/rbac/v1 k8s.io/kubernetes/pkg/features k8s.io/kubernetes/pkg/util/parsers -# k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 +# k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 ## explicit k8s.io/utils/buffer k8s.io/utils/exec @@ -1731,19 +1751,19 @@ mvdan.cc/interfacer/check mvdan.cc/lint # mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 mvdan.cc/unparam/check -# sigs.k8s.io/cluster-api-provider-aws v0.6.4 => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201002185235-b1a6ba661ed8 +# sigs.k8s.io/cluster-api-provider-aws v0.6.4 => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210325053523-878b3a16e964 ## explicit sigs.k8s.io/cluster-api-provider-aws/pkg/apis sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1 -# sigs.k8s.io/cluster-api-provider-azure v0.4.12 => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201119004617-db9109863f2f +# sigs.k8s.io/cluster-api-provider-azure v0.4.14 => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210324211250-7a14c7f1a41a ## explicit sigs.k8s.io/cluster-api-provider-azure/pkg/apis sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1 -# sigs.k8s.io/cluster-api-provider-openstack v0.3.3 => github.com/openshift/cluster-api-provider-openstack v0.0.0-20201002114634-3622a0ce6b56 +# sigs.k8s.io/cluster-api-provider-openstack v0.3.4 => github.com/openshift/cluster-api-provider-openstack v0.0.0-20210318183513-48b73415908a ## explicit sigs.k8s.io/cluster-api-provider-openstack/pkg/apis sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1 -# sigs.k8s.io/controller-runtime v0.8.2 => sigs.k8s.io/controller-runtime v0.6.4 +# sigs.k8s.io/controller-runtime v0.8.3 => sigs.k8s.io/controller-runtime v0.6.4 ## explicit sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder @@ -1827,18 +1847,18 @@ sigs.k8s.io/yaml # github.com/docker/spdystream => github.com/docker/spdystream v0.1.0 # github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.8 # github.com/metal3-io/baremetal-operator => github.com/openshift/baremetal-operator v0.0.0-20210128152529-b4b10a088a0c -# github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20201105032354-fcd9e769a45c +# github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210304234017-16b67b78b538 # github.com/openshift/api => github.com/openshift/api v0.0.0-20210127195806-54e5e88cf848 # github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200929181438-91d71ef2122c # github.com/openshift/cloud-credential-operator => github.com/openshift/cloud-credential-operator v0.0.0-20201202215507-371eb009d9a1 # github.com/openshift/cluster-api => github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 -# github.com/openshift/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002153134-a0fc9aa4ce81 -# github.com/openshift/cluster-api-provider-libvirt => github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20200919090150-1ca52adab176 +# github.com/openshift/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210324200451-9b15b1192f37 +# github.com/openshift/cluster-api-provider-libvirt => github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20210326022320-b3e2718f198e # github.com/openshift/cluster-api-provider-ovirt => github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20210210114935-91f12f3f7dee # github.com/openshift/console-operator => github.com/openshift/console-operator v0.0.0-20210116095614-7fd78a283616 # github.com/openshift/installer => github.com/jim-minter/installer v0.9.0-master.0.20210407195006-452c09da58ae -# github.com/openshift/machine-api-operator => github.com/openshift/machine-api-operator v0.2.1-0.20210212025836-cb508cd8777d -# github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20210211205336-14a2b82d9f4c +# github.com/openshift/machine-api-operator => github.com/openshift/machine-api-operator v0.2.1-0.20210304062120-b5fb76b27015 +# github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20210331171350-d5dc2b519aed # github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.19.4 # github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b # github.com/satori/uuid => github.com/satori/uuid v1.2.1-0.20181028125025-b2ce2384e17b @@ -1869,9 +1889,10 @@ sigs.k8s.io/yaml # k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.0-rc.2 # k8s.io/metrics => k8s.io/metrics v0.19.0-rc.2 # k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.0-rc.2 -# sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201002185235-b1a6ba661ed8 -# sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201119004617-db9109863f2f -# sigs.k8s.io/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002153134-a0fc9aa4ce81 -# sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20201002114634-3622a0ce6b56 +# sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210325053523-878b3a16e964 +# sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210324211250-7a14c7f1a41a +# sigs.k8s.io/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210324200451-9b15b1192f37 +# sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20210318183513-48b73415908a # sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.6.4 # sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.5.0 +# github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210204141222-0e7715cd7725